2024-11-28 09:20:55,626 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 2024-11-28 09:20:55,643 main DEBUG Took 0.014928 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-28 09:20:55,643 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-28 09:20:55,644 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-28 09:20:55,645 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-28 09:20:55,647 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-28 09:20:55,656 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-28 09:20:55,669 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-28 09:20:55,671 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-28 09:20:55,671 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-28 09:20:55,672 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-28 09:20:55,673 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-28 09:20:55,673 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-28 09:20:55,675 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-28 09:20:55,675 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-28 09:20:55,676 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-28 09:20:55,676 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-28 09:20:55,677 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-28 09:20:55,678 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-28 09:20:55,679 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-28 09:20:55,679 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-28 09:20:55,680 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-28 09:20:55,680 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-28 09:20:55,681 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-28 09:20:55,681 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-28 09:20:55,682 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-28 09:20:55,682 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-28 09:20:55,683 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-28 09:20:55,684 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-28 09:20:55,684 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-28 09:20:55,685 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-28 09:20:55,686 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-28 09:20:55,686 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-28 09:20:55,688 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-28 09:20:55,690 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-28 09:20:55,693 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-28 09:20:55,693 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-28 09:20:55,695 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-28 09:20:55,695 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-28 09:20:55,706 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-28 09:20:55,708 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-28 09:20:55,710 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-28 09:20:55,710 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-28 09:20:55,711 main DEBUG createAppenders(={Console}) 2024-11-28 09:20:55,711 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 initialized 2024-11-28 09:20:55,712 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 2024-11-28 09:20:55,712 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 OK. 2024-11-28 09:20:55,712 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-28 09:20:55,713 main DEBUG OutputStream closed 2024-11-28 09:20:55,713 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-28 09:20:55,713 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-28 09:20:55,714 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@54e1c68b OK 2024-11-28 09:20:55,789 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-28 09:20:55,792 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-28 09:20:55,793 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-28 09:20:55,795 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-28 09:20:55,795 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-28 09:20:55,796 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-28 09:20:55,796 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-28 09:20:55,797 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-28 09:20:55,797 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-28 09:20:55,798 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-28 09:20:55,798 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-28 09:20:55,799 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-28 09:20:55,799 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-28 09:20:55,800 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-28 09:20:55,800 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-28 09:20:55,801 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-28 09:20:55,801 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-28 09:20:55,803 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-28 09:20:55,806 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-28 09:20:55,807 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-logging/target/hbase-logging-2.7.0-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@7dda48d9) with optional ClassLoader: null 2024-11-28 09:20:55,807 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-28 09:20:55,808 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@7dda48d9] started OK. 2024-11-28T09:20:56,155 DEBUG [main {}] hbase.HBaseTestingUtility(348): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b02eea69-620e-c121-967e-25e43ad9589c 2024-11-28 09:20:56,159 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-28 09:20:56,160 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-28T09:20:56,173 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.TestAcidGuaranteesWithBasicPolicy timeout: 13 mins 2024-11-28T09:20:56,193 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=1, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-28T09:20:56,196 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b02eea69-620e-c121-967e-25e43ad9589c/cluster_759b5d56-d6ac-c9cb-81a4-eabfc0e7d3de, deleteOnExit=true 2024-11-28T09:20:56,196 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-11-28T09:20:56,197 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b02eea69-620e-c121-967e-25e43ad9589c/test.cache.data in system properties and HBase conf 2024-11-28T09:20:56,197 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b02eea69-620e-c121-967e-25e43ad9589c/hadoop.tmp.dir in system properties and HBase conf 2024-11-28T09:20:56,198 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b02eea69-620e-c121-967e-25e43ad9589c/hadoop.log.dir in system properties and HBase conf 2024-11-28T09:20:56,199 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b02eea69-620e-c121-967e-25e43ad9589c/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-28T09:20:56,199 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b02eea69-620e-c121-967e-25e43ad9589c/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-28T09:20:56,199 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-11-28T09:20:56,297 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-28T09:20:56,398 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-28T09:20:56,402 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b02eea69-620e-c121-967e-25e43ad9589c/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-28T09:20:56,403 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b02eea69-620e-c121-967e-25e43ad9589c/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-28T09:20:56,403 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b02eea69-620e-c121-967e-25e43ad9589c/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-28T09:20:56,404 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b02eea69-620e-c121-967e-25e43ad9589c/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-28T09:20:56,405 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b02eea69-620e-c121-967e-25e43ad9589c/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-28T09:20:56,405 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b02eea69-620e-c121-967e-25e43ad9589c/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-28T09:20:56,406 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b02eea69-620e-c121-967e-25e43ad9589c/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-28T09:20:56,406 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b02eea69-620e-c121-967e-25e43ad9589c/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-28T09:20:56,407 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b02eea69-620e-c121-967e-25e43ad9589c/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-28T09:20:56,408 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b02eea69-620e-c121-967e-25e43ad9589c/nfs.dump.dir in system properties and HBase conf 2024-11-28T09:20:56,408 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b02eea69-620e-c121-967e-25e43ad9589c/java.io.tmpdir in system properties and HBase conf 2024-11-28T09:20:56,409 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b02eea69-620e-c121-967e-25e43ad9589c/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-28T09:20:56,409 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b02eea69-620e-c121-967e-25e43ad9589c/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-28T09:20:56,410 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b02eea69-620e-c121-967e-25e43ad9589c/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-28T09:20:57,236 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-28T09:20:57,335 INFO [Time-limited test {}] log.Log(170): Logging initialized @2543ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-28T09:20:57,428 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-28T09:20:57,498 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-28T09:20:57,519 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-28T09:20:57,520 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-28T09:20:57,521 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-28T09:20:57,535 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-28T09:20:57,537 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@8167a4c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b02eea69-620e-c121-967e-25e43ad9589c/hadoop.log.dir/,AVAILABLE} 2024-11-28T09:20:57,538 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@62a9beb2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-28T09:20:57,748 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@b03fcff{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b02eea69-620e-c121-967e-25e43ad9589c/java.io.tmpdir/jetty-localhost-40871-hadoop-hdfs-3_4_1-tests_jar-_-any-18161728264184819466/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-28T09:20:57,755 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@e0a3ea0{HTTP/1.1, (http/1.1)}{localhost:40871} 2024-11-28T09:20:57,755 INFO [Time-limited test {}] server.Server(415): Started @2964ms 2024-11-28T09:20:58,144 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-28T09:20:58,152 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-28T09:20:58,154 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-28T09:20:58,154 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-28T09:20:58,154 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-28T09:20:58,155 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@134e7cc5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b02eea69-620e-c121-967e-25e43ad9589c/hadoop.log.dir/,AVAILABLE} 2024-11-28T09:20:58,156 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2ca71a25{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-28T09:20:58,274 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@10ba49e9{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b02eea69-620e-c121-967e-25e43ad9589c/java.io.tmpdir/jetty-localhost-44941-hadoop-hdfs-3_4_1-tests_jar-_-any-12030774026478839781/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-28T09:20:58,275 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@661c2e9c{HTTP/1.1, (http/1.1)}{localhost:44941} 2024-11-28T09:20:58,275 INFO [Time-limited test {}] server.Server(415): Started @3484ms 2024-11-28T09:20:58,332 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-28T09:20:58,835 WARN [Thread-73 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b02eea69-620e-c121-967e-25e43ad9589c/cluster_759b5d56-d6ac-c9cb-81a4-eabfc0e7d3de/dfs/data/data2/current/BP-2046945961-172.17.0.2-1732785656987/current, will proceed with Du for space computation calculation, 2024-11-28T09:20:58,835 WARN [Thread-72 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b02eea69-620e-c121-967e-25e43ad9589c/cluster_759b5d56-d6ac-c9cb-81a4-eabfc0e7d3de/dfs/data/data1/current/BP-2046945961-172.17.0.2-1732785656987/current, will proceed with Du for space computation calculation, 2024-11-28T09:20:58,878 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-28T09:20:58,934 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf7ea453bee302d7a with lease ID 0x3693cc87335a07ca: Processing first storage report for DS-a0e51276-be01-43e9-8947-2c1814e5bc84 from datanode DatanodeRegistration(127.0.0.1:42869, datanodeUuid=e2f946b8-9461-43ef-9988-922692e43057, infoPort=44735, infoSecurePort=0, ipcPort=38507, storageInfo=lv=-57;cid=testClusterID;nsid=241278336;c=1732785656987) 2024-11-28T09:20:58,936 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf7ea453bee302d7a with lease ID 0x3693cc87335a07ca: from storage DS-a0e51276-be01-43e9-8947-2c1814e5bc84 node DatanodeRegistration(127.0.0.1:42869, datanodeUuid=e2f946b8-9461-43ef-9988-922692e43057, infoPort=44735, infoSecurePort=0, ipcPort=38507, storageInfo=lv=-57;cid=testClusterID;nsid=241278336;c=1732785656987), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-28T09:20:58,936 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf7ea453bee302d7a with lease ID 0x3693cc87335a07ca: Processing first storage report for DS-a4fcb9c7-b947-4613-b9ee-8ca5b61b72cb from datanode DatanodeRegistration(127.0.0.1:42869, datanodeUuid=e2f946b8-9461-43ef-9988-922692e43057, infoPort=44735, infoSecurePort=0, ipcPort=38507, storageInfo=lv=-57;cid=testClusterID;nsid=241278336;c=1732785656987) 2024-11-28T09:20:58,937 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf7ea453bee302d7a with lease ID 0x3693cc87335a07ca: from storage DS-a4fcb9c7-b947-4613-b9ee-8ca5b61b72cb node DatanodeRegistration(127.0.0.1:42869, datanodeUuid=e2f946b8-9461-43ef-9988-922692e43057, infoPort=44735, infoSecurePort=0, ipcPort=38507, storageInfo=lv=-57;cid=testClusterID;nsid=241278336;c=1732785656987), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-28T09:20:58,989 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b02eea69-620e-c121-967e-25e43ad9589c 2024-11-28T09:20:59,067 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b02eea69-620e-c121-967e-25e43ad9589c/cluster_759b5d56-d6ac-c9cb-81a4-eabfc0e7d3de/zookeeper_0, clientPort=53251, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b02eea69-620e-c121-967e-25e43ad9589c/cluster_759b5d56-d6ac-c9cb-81a4-eabfc0e7d3de/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b02eea69-620e-c121-967e-25e43ad9589c/cluster_759b5d56-d6ac-c9cb-81a4-eabfc0e7d3de/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-28T09:20:59,077 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=53251 2024-11-28T09:20:59,087 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-28T09:20:59,090 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-28T09:20:59,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741825_1001 (size=7) 2024-11-28T09:20:59,739 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532 with version=8 2024-11-28T09:20:59,739 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1460): Setting hbase.fs.tmp.dir to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/hbase-staging 2024-11-28T09:20:59,876 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-28T09:21:00,156 INFO [Time-limited test {}] client.ConnectionUtils(129): master/363d8d38a970:0 server-side Connection retries=45 2024-11-28T09:21:00,175 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-28T09:21:00,176 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-28T09:21:00,176 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-28T09:21:00,177 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-28T09:21:00,177 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-28T09:21:00,328 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-28T09:21:00,393 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-28T09:21:00,402 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-28T09:21:00,406 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-28T09:21:00,435 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 22124 (auto-detected) 2024-11-28T09:21:00,437 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-28T09:21:00,456 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:34825 2024-11-28T09:21:00,465 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-28T09:21:00,467 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-28T09:21:00,479 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:34825 connecting to ZooKeeper ensemble=127.0.0.1:53251 2024-11-28T09:21:00,511 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:348250x0, quorum=127.0.0.1:53251, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-28T09:21:00,514 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:34825-0x1003d6e958f0000 connected 2024-11-28T09:21:00,553 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34825-0x1003d6e958f0000, quorum=127.0.0.1:53251, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-28T09:21:00,556 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34825-0x1003d6e958f0000, quorum=127.0.0.1:53251, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-28T09:21:00,560 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34825-0x1003d6e958f0000, quorum=127.0.0.1:53251, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-28T09:21:00,564 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34825 2024-11-28T09:21:00,565 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34825 2024-11-28T09:21:00,565 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34825 2024-11-28T09:21:00,568 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34825 2024-11-28T09:21:00,568 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34825 2024-11-28T09:21:00,575 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532, hbase.cluster.distributed=false 2024-11-28T09:21:00,638 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/363d8d38a970:0 server-side Connection retries=45 2024-11-28T09:21:00,638 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-28T09:21:00,639 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-28T09:21:00,639 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-28T09:21:00,639 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-28T09:21:00,639 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-28T09:21:00,641 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-28T09:21:00,643 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-28T09:21:00,644 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:33819 2024-11-28T09:21:00,646 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-28T09:21:00,653 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-28T09:21:00,654 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-28T09:21:00,658 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-28T09:21:00,664 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:33819 connecting to ZooKeeper ensemble=127.0.0.1:53251 2024-11-28T09:21:00,668 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:338190x0, quorum=127.0.0.1:53251, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-28T09:21:00,669 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:33819-0x1003d6e958f0001 connected 2024-11-28T09:21:00,669 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33819-0x1003d6e958f0001, quorum=127.0.0.1:53251, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-28T09:21:00,670 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33819-0x1003d6e958f0001, quorum=127.0.0.1:53251, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-28T09:21:00,671 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33819-0x1003d6e958f0001, quorum=127.0.0.1:53251, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-28T09:21:00,673 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33819 2024-11-28T09:21:00,673 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33819 2024-11-28T09:21:00,674 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33819 2024-11-28T09:21:00,674 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33819 2024-11-28T09:21:00,676 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33819 2024-11-28T09:21:00,678 INFO [master/363d8d38a970:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/363d8d38a970,34825,1732785659868 2024-11-28T09:21:00,684 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33819-0x1003d6e958f0001, quorum=127.0.0.1:53251, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-28T09:21:00,685 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34825-0x1003d6e958f0000, quorum=127.0.0.1:53251, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-28T09:21:00,686 DEBUG [master/363d8d38a970:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34825-0x1003d6e958f0000, quorum=127.0.0.1:53251, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/363d8d38a970,34825,1732785659868 2024-11-28T09:21:00,694 DEBUG [M:0;363d8d38a970:34825 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;363d8d38a970:34825 2024-11-28T09:21:00,707 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34825-0x1003d6e958f0000, quorum=127.0.0.1:53251, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-28T09:21:00,707 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33819-0x1003d6e958f0001, quorum=127.0.0.1:53251, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-28T09:21:00,707 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34825-0x1003d6e958f0000, quorum=127.0.0.1:53251, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T09:21:00,707 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33819-0x1003d6e958f0001, quorum=127.0.0.1:53251, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T09:21:00,708 DEBUG [master/363d8d38a970:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34825-0x1003d6e958f0000, quorum=127.0.0.1:53251, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-28T09:21:00,709 INFO [master/363d8d38a970:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/363d8d38a970,34825,1732785659868 from backup master directory 2024-11-28T09:21:00,709 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:34825-0x1003d6e958f0000, quorum=127.0.0.1:53251, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-28T09:21:00,712 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33819-0x1003d6e958f0001, quorum=127.0.0.1:53251, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-28T09:21:00,712 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34825-0x1003d6e958f0000, quorum=127.0.0.1:53251, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/363d8d38a970,34825,1732785659868 2024-11-28T09:21:00,712 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34825-0x1003d6e958f0000, quorum=127.0.0.1:53251, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-28T09:21:00,712 WARN [master/363d8d38a970:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-28T09:21:00,713 INFO [master/363d8d38a970:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=363d8d38a970,34825,1732785659868 2024-11-28T09:21:00,715 INFO [master/363d8d38a970:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-28T09:21:00,716 INFO [master/363d8d38a970:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-28T09:21:00,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741826_1002 (size=42) 2024-11-28T09:21:01,185 DEBUG [master/363d8d38a970:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/hbase.id with ID: 97388689-d327-4975-b2a4-0acff8c7d89e 2024-11-28T09:21:01,226 INFO [master/363d8d38a970:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-28T09:21:01,257 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33819-0x1003d6e958f0001, quorum=127.0.0.1:53251, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T09:21:01,257 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34825-0x1003d6e958f0000, quorum=127.0.0.1:53251, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T09:21:01,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741827_1003 (size=196) 2024-11-28T09:21:01,691 INFO [master/363d8d38a970:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-28T09:21:01,693 INFO [master/363d8d38a970:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-28T09:21:01,712 DEBUG [master/363d8d38a970:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at java.lang.Class.forName0(Native Method) ~[?:?] at java.lang.Class.forName(Class.java:375) ~[?:?] at org.apache.hadoop.hbase.wal.AsyncFSWALProvider.load(AsyncFSWALProvider.java:147) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProviderClass(WALFactory.java:160) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProvider(WALFactory.java:200) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:232) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:207) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:402) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:973) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2470) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:590) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.HMaster.lambda$run$1(HMaster.java:587) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:01,717 INFO [master/363d8d38a970:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-28T09:21:01,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741828_1004 (size=1189) 2024-11-28T09:21:01,772 INFO [master/363d8d38a970:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/MasterData/data/master/store 2024-11-28T09:21:01,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741829_1005 (size=34) 2024-11-28T09:21:01,793 INFO [master/363d8d38a970:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-28T09:21:01,793 DEBUG [master/363d8d38a970:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T09:21:01,794 DEBUG [master/363d8d38a970:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-28T09:21:01,794 INFO [master/363d8d38a970:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-28T09:21:01,795 DEBUG [master/363d8d38a970:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-28T09:21:01,795 DEBUG [master/363d8d38a970:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-28T09:21:01,795 DEBUG [master/363d8d38a970:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-28T09:21:01,795 INFO [master/363d8d38a970:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-28T09:21:01,795 DEBUG [master/363d8d38a970:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-11-28T09:21:01,797 WARN [master/363d8d38a970:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/MasterData/data/master/store/.initializing 2024-11-28T09:21:01,797 DEBUG [master/363d8d38a970:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/MasterData/WALs/363d8d38a970,34825,1732785659868 2024-11-28T09:21:01,805 INFO [master/363d8d38a970:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-28T09:21:01,817 INFO [master/363d8d38a970:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=363d8d38a970%2C34825%2C1732785659868, suffix=, logDir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/MasterData/WALs/363d8d38a970,34825,1732785659868, archiveDir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/MasterData/oldWALs, maxLogs=10 2024-11-28T09:21:01,847 DEBUG [master/363d8d38a970:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/MasterData/WALs/363d8d38a970,34825,1732785659868/363d8d38a970%2C34825%2C1732785659868.1732785661822, exclude list is [], retry=0 2024-11-28T09:21:01,867 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42869,DS-a0e51276-be01-43e9-8947-2c1814e5bc84,DISK] 2024-11-28T09:21:01,870 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-11-28T09:21:01,911 INFO [master/363d8d38a970:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/MasterData/WALs/363d8d38a970,34825,1732785659868/363d8d38a970%2C34825%2C1732785659868.1732785661822 2024-11-28T09:21:01,912 DEBUG [master/363d8d38a970:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:44735:44735)] 2024-11-28T09:21:01,913 DEBUG [master/363d8d38a970:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-28T09:21:01,913 DEBUG [master/363d8d38a970:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T09:21:01,918 DEBUG [master/363d8d38a970:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-28T09:21:01,919 DEBUG [master/363d8d38a970:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-28T09:21:01,964 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-28T09:21:01,988 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-28T09:21:01,992 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:01,995 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-28T09:21:01,995 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-28T09:21:01,999 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-28T09:21:02,000 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:02,001 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T09:21:02,002 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-28T09:21:02,005 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-28T09:21:02,005 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:02,006 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T09:21:02,006 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-28T09:21:02,009 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-28T09:21:02,009 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:02,010 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T09:21:02,013 DEBUG [master/363d8d38a970:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-28T09:21:02,014 DEBUG [master/363d8d38a970:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-28T09:21:02,023 DEBUG [master/363d8d38a970:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-28T09:21:02,027 DEBUG [master/363d8d38a970:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-28T09:21:02,031 DEBUG [master/363d8d38a970:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-28T09:21:02,032 INFO [master/363d8d38a970:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70937045, jitterRate=0.057044342160224915}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-28T09:21:02,036 DEBUG [master/363d8d38a970:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-11-28T09:21:02,037 INFO [master/363d8d38a970:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-28T09:21:02,066 DEBUG [master/363d8d38a970:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@17a07dc1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T09:21:02,103 INFO [master/363d8d38a970:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-11-28T09:21:02,115 INFO [master/363d8d38a970:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-28T09:21:02,115 INFO [master/363d8d38a970:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-28T09:21:02,117 INFO [master/363d8d38a970:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-28T09:21:02,119 INFO [master/363d8d38a970:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 1 msec 2024-11-28T09:21:02,124 INFO [master/363d8d38a970:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 5 msec 2024-11-28T09:21:02,125 INFO [master/363d8d38a970:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-28T09:21:02,157 INFO [master/363d8d38a970:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-28T09:21:02,174 DEBUG [master/363d8d38a970:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34825-0x1003d6e958f0000, quorum=127.0.0.1:53251, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-28T09:21:02,176 DEBUG [master/363d8d38a970:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-11-28T09:21:02,179 INFO [master/363d8d38a970:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-28T09:21:02,180 DEBUG [master/363d8d38a970:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34825-0x1003d6e958f0000, quorum=127.0.0.1:53251, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-28T09:21:02,181 DEBUG [master/363d8d38a970:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-11-28T09:21:02,184 INFO [master/363d8d38a970:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-28T09:21:02,188 DEBUG [master/363d8d38a970:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34825-0x1003d6e958f0000, quorum=127.0.0.1:53251, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-28T09:21:02,189 DEBUG [master/363d8d38a970:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-11-28T09:21:02,190 DEBUG [master/363d8d38a970:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34825-0x1003d6e958f0000, quorum=127.0.0.1:53251, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-28T09:21:02,192 DEBUG [master/363d8d38a970:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-11-28T09:21:02,203 DEBUG [master/363d8d38a970:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34825-0x1003d6e958f0000, quorum=127.0.0.1:53251, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-28T09:21:02,204 DEBUG [master/363d8d38a970:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-28T09:21:02,208 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33819-0x1003d6e958f0001, quorum=127.0.0.1:53251, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-28T09:21:02,208 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34825-0x1003d6e958f0000, quorum=127.0.0.1:53251, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-28T09:21:02,209 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33819-0x1003d6e958f0001, quorum=127.0.0.1:53251, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T09:21:02,209 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34825-0x1003d6e958f0000, quorum=127.0.0.1:53251, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T09:21:02,209 INFO [master/363d8d38a970:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=363d8d38a970,34825,1732785659868, sessionid=0x1003d6e958f0000, setting cluster-up flag (Was=false) 2024-11-28T09:21:02,222 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33819-0x1003d6e958f0001, quorum=127.0.0.1:53251, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T09:21:02,222 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34825-0x1003d6e958f0000, quorum=127.0.0.1:53251, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T09:21:02,227 DEBUG [master/363d8d38a970:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-28T09:21:02,229 DEBUG [master/363d8d38a970:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=363d8d38a970,34825,1732785659868 2024-11-28T09:21:02,234 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33819-0x1003d6e958f0001, quorum=127.0.0.1:53251, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T09:21:02,234 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34825-0x1003d6e958f0000, quorum=127.0.0.1:53251, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T09:21:02,240 DEBUG [master/363d8d38a970:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-28T09:21:02,242 DEBUG [master/363d8d38a970:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=363d8d38a970,34825,1732785659868 2024-11-28T09:21:02,292 DEBUG [RS:0;363d8d38a970:33819 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;363d8d38a970:33819 2024-11-28T09:21:02,294 INFO [RS:0;363d8d38a970:33819 {}] regionserver.HRegionServer(1008): ClusterId : 97388689-d327-4975-b2a4-0acff8c7d89e 2024-11-28T09:21:02,297 DEBUG [RS:0;363d8d38a970:33819 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-28T09:21:02,303 DEBUG [RS:0;363d8d38a970:33819 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-28T09:21:02,304 DEBUG [RS:0;363d8d38a970:33819 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-28T09:21:02,308 DEBUG [RS:0;363d8d38a970:33819 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-28T09:21:02,308 DEBUG [RS:0;363d8d38a970:33819 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3c0afa4b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T09:21:02,311 DEBUG [RS:0;363d8d38a970:33819 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@612c960b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=363d8d38a970/172.17.0.2:0 2024-11-28T09:21:02,314 INFO [RS:0;363d8d38a970:33819 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-11-28T09:21:02,314 INFO [RS:0;363d8d38a970:33819 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-11-28T09:21:02,315 DEBUG [RS:0;363d8d38a970:33819 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-11-28T09:21:02,317 INFO [RS:0;363d8d38a970:33819 {}] regionserver.HRegionServer(3073): reportForDuty to master=363d8d38a970,34825,1732785659868 with isa=363d8d38a970/172.17.0.2:33819, startcode=1732785660637 2024-11-28T09:21:02,332 DEBUG [RS:0;363d8d38a970:33819 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-28T09:21:02,332 DEBUG [master/363d8d38a970:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-11-28T09:21:02,340 INFO [master/363d8d38a970:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-11-28T09:21:02,344 INFO [master/363d8d38a970:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-28T09:21:02,352 DEBUG [master/363d8d38a970:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 363d8d38a970,34825,1732785659868 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-28T09:21:02,357 DEBUG [master/363d8d38a970:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/363d8d38a970:0, corePoolSize=5, maxPoolSize=5 2024-11-28T09:21:02,357 DEBUG [master/363d8d38a970:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/363d8d38a970:0, corePoolSize=5, maxPoolSize=5 2024-11-28T09:21:02,357 DEBUG [master/363d8d38a970:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/363d8d38a970:0, corePoolSize=5, maxPoolSize=5 2024-11-28T09:21:02,357 DEBUG [master/363d8d38a970:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/363d8d38a970:0, corePoolSize=5, maxPoolSize=5 2024-11-28T09:21:02,357 DEBUG [master/363d8d38a970:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/363d8d38a970:0, corePoolSize=10, maxPoolSize=10 2024-11-28T09:21:02,358 DEBUG [master/363d8d38a970:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/363d8d38a970:0, corePoolSize=1, maxPoolSize=1 2024-11-28T09:21:02,358 DEBUG [master/363d8d38a970:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/363d8d38a970:0, corePoolSize=2, maxPoolSize=2 2024-11-28T09:21:02,358 DEBUG [master/363d8d38a970:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/363d8d38a970:0, corePoolSize=1, maxPoolSize=1 2024-11-28T09:21:02,366 INFO [master/363d8d38a970:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732785692365 2024-11-28T09:21:02,368 INFO [master/363d8d38a970:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-28T09:21:02,369 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-11-28T09:21:02,369 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-11-28T09:21:02,369 INFO [master/363d8d38a970:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-28T09:21:02,374 INFO [master/363d8d38a970:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-28T09:21:02,374 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:02,374 INFO [master/363d8d38a970:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-28T09:21:02,374 INFO [master/363d8d38a970:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-28T09:21:02,375 INFO [master/363d8d38a970:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-28T09:21:02,374 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-28T09:21:02,376 INFO [master/363d8d38a970:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-28T09:21:02,378 INFO [master/363d8d38a970:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-28T09:21:02,380 INFO [master/363d8d38a970:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-28T09:21:02,380 INFO [master/363d8d38a970:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-28T09:21:02,380 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37735, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-28T09:21:02,383 INFO [master/363d8d38a970:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-28T09:21:02,383 INFO [master/363d8d38a970:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-28T09:21:02,388 DEBUG [master/363d8d38a970:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/363d8d38a970:0:becomeActiveMaster-HFileCleaner.large.0-1732785662384,5,FailOnTimeoutGroup] 2024-11-28T09:21:02,388 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34825 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3280) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:593) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:02,390 DEBUG [master/363d8d38a970:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/363d8d38a970:0:becomeActiveMaster-HFileCleaner.small.0-1732785662389,5,FailOnTimeoutGroup] 2024-11-28T09:21:02,390 INFO [master/363d8d38a970:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-28T09:21:02,390 INFO [master/363d8d38a970:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-28T09:21:02,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741831_1007 (size=1039) 2024-11-28T09:21:02,392 INFO [master/363d8d38a970:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-28T09:21:02,392 INFO [master/363d8d38a970:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-28T09:21:02,421 DEBUG [RS:0;363d8d38a970:33819 {}] regionserver.HRegionServer(3097): Master is not running yet 2024-11-28T09:21:02,421 WARN [RS:0;363d8d38a970:33819 {}] regionserver.HRegionServer(1099): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-28T09:21:02,522 INFO [RS:0;363d8d38a970:33819 {}] regionserver.HRegionServer(3073): reportForDuty to master=363d8d38a970,34825,1732785659868 with isa=363d8d38a970/172.17.0.2:33819, startcode=1732785660637 2024-11-28T09:21:02,525 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34825 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 363d8d38a970,33819,1732785660637 2024-11-28T09:21:02,527 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34825 {}] master.ServerManager(486): Registering regionserver=363d8d38a970,33819,1732785660637 2024-11-28T09:21:02,537 DEBUG [RS:0;363d8d38a970:33819 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532 2024-11-28T09:21:02,537 DEBUG [RS:0;363d8d38a970:33819 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:33549 2024-11-28T09:21:02,537 DEBUG [RS:0;363d8d38a970:33819 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-11-28T09:21:02,541 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34825-0x1003d6e958f0000, quorum=127.0.0.1:53251, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-28T09:21:02,542 DEBUG [RS:0;363d8d38a970:33819 {}] zookeeper.ZKUtil(111): regionserver:33819-0x1003d6e958f0001, quorum=127.0.0.1:53251, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/363d8d38a970,33819,1732785660637 2024-11-28T09:21:02,542 WARN [RS:0;363d8d38a970:33819 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-28T09:21:02,542 INFO [RS:0;363d8d38a970:33819 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-28T09:21:02,543 DEBUG [RS:0;363d8d38a970:33819 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/WALs/363d8d38a970,33819,1732785660637 2024-11-28T09:21:02,544 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [363d8d38a970,33819,1732785660637] 2024-11-28T09:21:02,555 DEBUG [RS:0;363d8d38a970:33819 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-11-28T09:21:02,566 INFO [RS:0;363d8d38a970:33819 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-28T09:21:02,578 INFO [RS:0;363d8d38a970:33819 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-28T09:21:02,580 INFO [RS:0;363d8d38a970:33819 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-28T09:21:02,581 INFO [RS:0;363d8d38a970:33819 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-28T09:21:02,581 INFO [RS:0;363d8d38a970:33819 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-11-28T09:21:02,588 INFO [RS:0;363d8d38a970:33819 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-28T09:21:02,588 DEBUG [RS:0;363d8d38a970:33819 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/363d8d38a970:0, corePoolSize=1, maxPoolSize=1 2024-11-28T09:21:02,588 DEBUG [RS:0;363d8d38a970:33819 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/363d8d38a970:0, corePoolSize=1, maxPoolSize=1 2024-11-28T09:21:02,589 DEBUG [RS:0;363d8d38a970:33819 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/363d8d38a970:0, corePoolSize=1, maxPoolSize=1 2024-11-28T09:21:02,589 DEBUG [RS:0;363d8d38a970:33819 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/363d8d38a970:0, corePoolSize=1, maxPoolSize=1 2024-11-28T09:21:02,589 DEBUG [RS:0;363d8d38a970:33819 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/363d8d38a970:0, corePoolSize=1, maxPoolSize=1 2024-11-28T09:21:02,589 DEBUG [RS:0;363d8d38a970:33819 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/363d8d38a970:0, corePoolSize=2, maxPoolSize=2 2024-11-28T09:21:02,589 DEBUG [RS:0;363d8d38a970:33819 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/363d8d38a970:0, corePoolSize=1, maxPoolSize=1 2024-11-28T09:21:02,589 DEBUG [RS:0;363d8d38a970:33819 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/363d8d38a970:0, corePoolSize=1, maxPoolSize=1 2024-11-28T09:21:02,589 DEBUG [RS:0;363d8d38a970:33819 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/363d8d38a970:0, corePoolSize=1, maxPoolSize=1 2024-11-28T09:21:02,590 DEBUG [RS:0;363d8d38a970:33819 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/363d8d38a970:0, corePoolSize=1, maxPoolSize=1 2024-11-28T09:21:02,590 DEBUG [RS:0;363d8d38a970:33819 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/363d8d38a970:0, corePoolSize=1, maxPoolSize=1 2024-11-28T09:21:02,590 DEBUG [RS:0;363d8d38a970:33819 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/363d8d38a970:0, corePoolSize=3, maxPoolSize=3 2024-11-28T09:21:02,590 DEBUG [RS:0;363d8d38a970:33819 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0, corePoolSize=3, maxPoolSize=3 2024-11-28T09:21:02,591 INFO [RS:0;363d8d38a970:33819 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-28T09:21:02,591 INFO [RS:0;363d8d38a970:33819 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-28T09:21:02,591 INFO [RS:0;363d8d38a970:33819 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-28T09:21:02,591 INFO [RS:0;363d8d38a970:33819 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-28T09:21:02,591 INFO [RS:0;363d8d38a970:33819 {}] hbase.ChoreService(168): Chore ScheduledChore name=363d8d38a970,33819,1732785660637-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-28T09:21:02,611 INFO [RS:0;363d8d38a970:33819 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-28T09:21:02,613 INFO [RS:0;363d8d38a970:33819 {}] hbase.ChoreService(168): Chore ScheduledChore name=363d8d38a970,33819,1732785660637-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-28T09:21:02,632 INFO [RS:0;363d8d38a970:33819 {}] regionserver.Replication(204): 363d8d38a970,33819,1732785660637 started 2024-11-28T09:21:02,632 INFO [RS:0;363d8d38a970:33819 {}] regionserver.HRegionServer(1767): Serving as 363d8d38a970,33819,1732785660637, RpcServer on 363d8d38a970/172.17.0.2:33819, sessionid=0x1003d6e958f0001 2024-11-28T09:21:02,632 DEBUG [RS:0;363d8d38a970:33819 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-28T09:21:02,632 DEBUG [RS:0;363d8d38a970:33819 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 363d8d38a970,33819,1732785660637 2024-11-28T09:21:02,633 DEBUG [RS:0;363d8d38a970:33819 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '363d8d38a970,33819,1732785660637' 2024-11-28T09:21:02,633 DEBUG [RS:0;363d8d38a970:33819 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-28T09:21:02,634 DEBUG [RS:0;363d8d38a970:33819 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-28T09:21:02,634 DEBUG [RS:0;363d8d38a970:33819 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-28T09:21:02,634 DEBUG [RS:0;363d8d38a970:33819 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-28T09:21:02,634 DEBUG [RS:0;363d8d38a970:33819 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 363d8d38a970,33819,1732785660637 2024-11-28T09:21:02,634 DEBUG [RS:0;363d8d38a970:33819 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '363d8d38a970,33819,1732785660637' 2024-11-28T09:21:02,635 DEBUG [RS:0;363d8d38a970:33819 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-28T09:21:02,635 DEBUG [RS:0;363d8d38a970:33819 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-28T09:21:02,636 DEBUG [RS:0;363d8d38a970:33819 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-28T09:21:02,636 INFO [RS:0;363d8d38a970:33819 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-28T09:21:02,636 INFO [RS:0;363d8d38a970:33819 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-28T09:21:02,741 INFO [RS:0;363d8d38a970:33819 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-28T09:21:02,745 INFO [RS:0;363d8d38a970:33819 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=363d8d38a970%2C33819%2C1732785660637, suffix=, logDir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/WALs/363d8d38a970,33819,1732785660637, archiveDir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/oldWALs, maxLogs=32 2024-11-28T09:21:02,763 DEBUG [RS:0;363d8d38a970:33819 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/WALs/363d8d38a970,33819,1732785660637/363d8d38a970%2C33819%2C1732785660637.1732785662747, exclude list is [], retry=0 2024-11-28T09:21:02,767 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42869,DS-a0e51276-be01-43e9-8947-2c1814e5bc84,DISK] 2024-11-28T09:21:02,771 INFO [RS:0;363d8d38a970:33819 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/WALs/363d8d38a970,33819,1732785660637/363d8d38a970%2C33819%2C1732785660637.1732785662747 2024-11-28T09:21:02,771 DEBUG [RS:0;363d8d38a970:33819 {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:44735:44735)] 2024-11-28T09:21:02,795 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-11-28T09:21:02,795 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532 2024-11-28T09:21:02,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741833_1009 (size=32) 2024-11-28T09:21:03,208 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T09:21:03,211 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-28T09:21:03,214 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-28T09:21:03,214 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:03,215 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-28T09:21:03,215 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-28T09:21:03,217 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-28T09:21:03,218 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:03,218 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-28T09:21:03,219 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-28T09:21:03,221 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-28T09:21:03,221 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:03,222 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-28T09:21:03,223 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/hbase/meta/1588230740 2024-11-28T09:21:03,224 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/hbase/meta/1588230740 2024-11-28T09:21:03,227 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-28T09:21:03,230 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-11-28T09:21:03,234 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-28T09:21:03,235 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72427077, jitterRate=0.07924754917621613}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-28T09:21:03,239 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-11-28T09:21:03,239 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-11-28T09:21:03,239 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-11-28T09:21:03,239 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-11-28T09:21:03,239 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-28T09:21:03,239 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-11-28T09:21:03,240 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-11-28T09:21:03,241 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-11-28T09:21:03,243 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-11-28T09:21:03,243 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-11-28T09:21:03,249 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-28T09:21:03,257 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-28T09:21:03,259 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-28T09:21:03,411 DEBUG [363d8d38a970:34825 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-28T09:21:03,415 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=363d8d38a970,33819,1732785660637 2024-11-28T09:21:03,420 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 363d8d38a970,33819,1732785660637, state=OPENING 2024-11-28T09:21:03,426 DEBUG [PEWorker-5 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-28T09:21:03,428 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33819-0x1003d6e958f0001, quorum=127.0.0.1:53251, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T09:21:03,428 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34825-0x1003d6e958f0000, quorum=127.0.0.1:53251, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T09:21:03,429 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-28T09:21:03,429 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-28T09:21:03,431 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=363d8d38a970,33819,1732785660637}] 2024-11-28T09:21:03,605 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:03,607 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-28T09:21:03,610 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58876, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-28T09:21:03,621 INFO [RS_OPEN_META-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-11-28T09:21:03,621 INFO [RS_OPEN_META-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-28T09:21:03,622 INFO [RS_OPEN_META-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-28T09:21:03,625 INFO [RS_OPEN_META-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=363d8d38a970%2C33819%2C1732785660637.meta, suffix=.meta, logDir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/WALs/363d8d38a970,33819,1732785660637, archiveDir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/oldWALs, maxLogs=32 2024-11-28T09:21:03,642 DEBUG [RS_OPEN_META-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/WALs/363d8d38a970,33819,1732785660637/363d8d38a970%2C33819%2C1732785660637.meta.1732785663627.meta, exclude list is [], retry=0 2024-11-28T09:21:03,646 DEBUG [RS-EventLoopGroup-3-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:42869,DS-a0e51276-be01-43e9-8947-2c1814e5bc84,DISK] 2024-11-28T09:21:03,650 INFO [RS_OPEN_META-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/WALs/363d8d38a970,33819,1732785660637/363d8d38a970%2C33819%2C1732785660637.meta.1732785663627.meta 2024-11-28T09:21:03,650 DEBUG [RS_OPEN_META-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:44735:44735)] 2024-11-28T09:21:03,651 DEBUG [RS_OPEN_META-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-28T09:21:03,652 DEBUG [RS_OPEN_META-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-28T09:21:03,718 DEBUG [RS_OPEN_META-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-28T09:21:03,723 INFO [RS_OPEN_META-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-28T09:21:03,728 DEBUG [RS_OPEN_META-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-28T09:21:03,728 DEBUG [RS_OPEN_META-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T09:21:03,728 DEBUG [RS_OPEN_META-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-11-28T09:21:03,729 DEBUG [RS_OPEN_META-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-11-28T09:21:03,732 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-28T09:21:03,734 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-28T09:21:03,734 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:03,736 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-28T09:21:03,736 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-28T09:21:03,737 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-28T09:21:03,738 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:03,738 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-28T09:21:03,738 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-28T09:21:03,740 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-28T09:21:03,740 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:03,741 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-28T09:21:03,743 DEBUG [RS_OPEN_META-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/hbase/meta/1588230740 2024-11-28T09:21:03,746 DEBUG [RS_OPEN_META-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/hbase/meta/1588230740 2024-11-28T09:21:03,748 DEBUG [RS_OPEN_META-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-28T09:21:03,751 DEBUG [RS_OPEN_META-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-11-28T09:21:03,753 INFO [RS_OPEN_META-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71195265, jitterRate=0.060892120003700256}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-28T09:21:03,754 DEBUG [RS_OPEN_META-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-11-28T09:21:03,761 INFO [RS_OPEN_META-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732785663599 2024-11-28T09:21:03,773 DEBUG [RS_OPEN_META-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-28T09:21:03,773 INFO [RS_OPEN_META-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-11-28T09:21:03,774 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=363d8d38a970,33819,1732785660637 2024-11-28T09:21:03,776 INFO [PEWorker-4 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 363d8d38a970,33819,1732785660637, state=OPEN 2024-11-28T09:21:03,781 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33819-0x1003d6e958f0001, quorum=127.0.0.1:53251, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-28T09:21:03,781 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34825-0x1003d6e958f0000, quorum=127.0.0.1:53251, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-28T09:21:03,781 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-28T09:21:03,781 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-28T09:21:03,786 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-11-28T09:21:03,786 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=363d8d38a970,33819,1732785660637 in 350 msec 2024-11-28T09:21:03,791 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-11-28T09:21:03,792 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 539 msec 2024-11-28T09:21:03,797 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 1.5180 sec 2024-11-28T09:21:03,797 INFO [master/363d8d38a970:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732785663797, completionTime=-1 2024-11-28T09:21:03,797 INFO [master/363d8d38a970:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-28T09:21:03,797 DEBUG [master/363d8d38a970:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-11-28T09:21:03,840 DEBUG [hconnection-0x4ff0f410-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T09:21:03,843 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58884, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T09:21:03,854 INFO [master/363d8d38a970:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=1 2024-11-28T09:21:03,854 INFO [master/363d8d38a970:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732785723854 2024-11-28T09:21:03,854 INFO [master/363d8d38a970:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732785783854 2024-11-28T09:21:03,855 INFO [master/363d8d38a970:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 57 msec 2024-11-28T09:21:03,880 INFO [master/363d8d38a970:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=363d8d38a970,34825,1732785659868-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-28T09:21:03,880 INFO [master/363d8d38a970:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=363d8d38a970,34825,1732785659868-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-28T09:21:03,880 INFO [master/363d8d38a970:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=363d8d38a970,34825,1732785659868-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-28T09:21:03,882 INFO [master/363d8d38a970:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-363d8d38a970:34825, period=300000, unit=MILLISECONDS is enabled. 2024-11-28T09:21:03,883 INFO [master/363d8d38a970:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-28T09:21:03,887 DEBUG [master/363d8d38a970:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-11-28T09:21:03,892 INFO [master/363d8d38a970:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-11-28T09:21:03,893 INFO [master/363d8d38a970:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-28T09:21:03,900 DEBUG [master/363d8d38a970:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-11-28T09:21:03,903 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-11-28T09:21:03,904 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:03,906 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-28T09:21:03,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741835_1011 (size=358) 2024-11-28T09:21:04,322 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 4cf15397e80ca5505a26ba84c5dd3d7f, NAME => 'hbase:namespace,,1732785663892.4cf15397e80ca5505a26ba84c5dd3d7f.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532 2024-11-28T09:21:04,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741836_1012 (size=42) 2024-11-28T09:21:04,733 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1732785663892.4cf15397e80ca5505a26ba84c5dd3d7f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T09:21:04,734 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing 4cf15397e80ca5505a26ba84c5dd3d7f, disabling compactions & flushes 2024-11-28T09:21:04,734 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1732785663892.4cf15397e80ca5505a26ba84c5dd3d7f. 2024-11-28T09:21:04,734 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1732785663892.4cf15397e80ca5505a26ba84c5dd3d7f. 2024-11-28T09:21:04,734 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1732785663892.4cf15397e80ca5505a26ba84c5dd3d7f. after waiting 0 ms 2024-11-28T09:21:04,734 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1732785663892.4cf15397e80ca5505a26ba84c5dd3d7f. 2024-11-28T09:21:04,734 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1732785663892.4cf15397e80ca5505a26ba84c5dd3d7f. 2024-11-28T09:21:04,734 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for 4cf15397e80ca5505a26ba84c5dd3d7f: 2024-11-28T09:21:04,736 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-11-28T09:21:04,743 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1732785663892.4cf15397e80ca5505a26ba84c5dd3d7f.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1732785664737"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732785664737"}]},"ts":"1732785664737"} 2024-11-28T09:21:04,767 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-28T09:21:04,769 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-28T09:21:04,772 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732785664769"}]},"ts":"1732785664769"} 2024-11-28T09:21:04,776 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-11-28T09:21:04,783 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=4cf15397e80ca5505a26ba84c5dd3d7f, ASSIGN}] 2024-11-28T09:21:04,785 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=4cf15397e80ca5505a26ba84c5dd3d7f, ASSIGN 2024-11-28T09:21:04,787 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=4cf15397e80ca5505a26ba84c5dd3d7f, ASSIGN; state=OFFLINE, location=363d8d38a970,33819,1732785660637; forceNewPlan=false, retain=false 2024-11-28T09:21:04,937 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=4cf15397e80ca5505a26ba84c5dd3d7f, regionState=OPENING, regionLocation=363d8d38a970,33819,1732785660637 2024-11-28T09:21:04,941 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure 4cf15397e80ca5505a26ba84c5dd3d7f, server=363d8d38a970,33819,1732785660637}] 2024-11-28T09:21:05,094 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:05,100 INFO [RS_OPEN_PRIORITY_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1732785663892.4cf15397e80ca5505a26ba84c5dd3d7f. 2024-11-28T09:21:05,101 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => 4cf15397e80ca5505a26ba84c5dd3d7f, NAME => 'hbase:namespace,,1732785663892.4cf15397e80ca5505a26ba84c5dd3d7f.', STARTKEY => '', ENDKEY => ''} 2024-11-28T09:21:05,101 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace 4cf15397e80ca5505a26ba84c5dd3d7f 2024-11-28T09:21:05,101 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1732785663892.4cf15397e80ca5505a26ba84c5dd3d7f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T09:21:05,102 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for 4cf15397e80ca5505a26ba84c5dd3d7f 2024-11-28T09:21:05,102 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for 4cf15397e80ca5505a26ba84c5dd3d7f 2024-11-28T09:21:05,104 INFO [StoreOpener-4cf15397e80ca5505a26ba84c5dd3d7f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 4cf15397e80ca5505a26ba84c5dd3d7f 2024-11-28T09:21:05,106 INFO [StoreOpener-4cf15397e80ca5505a26ba84c5dd3d7f-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4cf15397e80ca5505a26ba84c5dd3d7f columnFamilyName info 2024-11-28T09:21:05,106 DEBUG [StoreOpener-4cf15397e80ca5505a26ba84c5dd3d7f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:05,107 INFO [StoreOpener-4cf15397e80ca5505a26ba84c5dd3d7f-1 {}] regionserver.HStore(327): Store=4cf15397e80ca5505a26ba84c5dd3d7f/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T09:21:05,108 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/hbase/namespace/4cf15397e80ca5505a26ba84c5dd3d7f 2024-11-28T09:21:05,109 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/hbase/namespace/4cf15397e80ca5505a26ba84c5dd3d7f 2024-11-28T09:21:05,113 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for 4cf15397e80ca5505a26ba84c5dd3d7f 2024-11-28T09:21:05,116 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/hbase/namespace/4cf15397e80ca5505a26ba84c5dd3d7f/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-28T09:21:05,117 INFO [RS_OPEN_PRIORITY_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened 4cf15397e80ca5505a26ba84c5dd3d7f; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72379941, jitterRate=0.07854516804218292}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-28T09:21:05,118 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for 4cf15397e80ca5505a26ba84c5dd3d7f: 2024-11-28T09:21:05,121 INFO [RS_OPEN_PRIORITY_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1732785663892.4cf15397e80ca5505a26ba84c5dd3d7f., pid=6, masterSystemTime=1732785665094 2024-11-28T09:21:05,124 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1732785663892.4cf15397e80ca5505a26ba84c5dd3d7f. 2024-11-28T09:21:05,124 INFO [RS_OPEN_PRIORITY_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1732785663892.4cf15397e80ca5505a26ba84c5dd3d7f. 2024-11-28T09:21:05,125 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=4cf15397e80ca5505a26ba84c5dd3d7f, regionState=OPEN, openSeqNum=2, regionLocation=363d8d38a970,33819,1732785660637 2024-11-28T09:21:05,132 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-11-28T09:21:05,133 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure 4cf15397e80ca5505a26ba84c5dd3d7f, server=363d8d38a970,33819,1732785660637 in 187 msec 2024-11-28T09:21:05,135 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-11-28T09:21:05,136 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=4cf15397e80ca5505a26ba84c5dd3d7f, ASSIGN in 350 msec 2024-11-28T09:21:05,137 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-28T09:21:05,137 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732785665137"}]},"ts":"1732785665137"} 2024-11-28T09:21:05,140 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-11-28T09:21:05,144 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-11-28T09:21:05,146 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 1.2500 sec 2024-11-28T09:21:05,213 DEBUG [master/363d8d38a970:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:34825-0x1003d6e958f0000, quorum=127.0.0.1:53251, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-11-28T09:21:05,215 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34825-0x1003d6e958f0000, quorum=127.0.0.1:53251, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-11-28T09:21:05,215 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33819-0x1003d6e958f0001, quorum=127.0.0.1:53251, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T09:21:05,215 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34825-0x1003d6e958f0000, quorum=127.0.0.1:53251, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T09:21:05,243 DEBUG [master/363d8d38a970:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-11-28T09:21:05,258 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34825-0x1003d6e958f0000, quorum=127.0.0.1:53251, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-11-28T09:21:05,263 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 23 msec 2024-11-28T09:21:05,266 DEBUG [master/363d8d38a970:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-11-28T09:21:05,277 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34825-0x1003d6e958f0000, quorum=127.0.0.1:53251, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-11-28T09:21:05,282 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 15 msec 2024-11-28T09:21:05,292 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34825-0x1003d6e958f0000, quorum=127.0.0.1:53251, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-11-28T09:21:05,295 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34825-0x1003d6e958f0000, quorum=127.0.0.1:53251, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-11-28T09:21:05,295 INFO [master/363d8d38a970:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 4.582sec 2024-11-28T09:21:05,296 INFO [master/363d8d38a970:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-28T09:21:05,298 INFO [master/363d8d38a970:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-28T09:21:05,299 INFO [master/363d8d38a970:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-28T09:21:05,299 INFO [master/363d8d38a970:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-28T09:21:05,299 INFO [master/363d8d38a970:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-28T09:21:05,300 INFO [master/363d8d38a970:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=363d8d38a970,34825,1732785659868-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-28T09:21:05,301 INFO [master/363d8d38a970:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=363d8d38a970,34825,1732785659868-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-28T09:21:05,307 DEBUG [master/363d8d38a970:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-11-28T09:21:05,308 INFO [master/363d8d38a970:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-28T09:21:05,308 INFO [master/363d8d38a970:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=363d8d38a970,34825,1732785659868-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-28T09:21:05,396 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3771e354 to 127.0.0.1:53251 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@38630296 2024-11-28T09:21:05,397 WARN [Time-limited test {}] client.ZKConnectionRegistry(90): ZKConnectionRegistry is deprecated. See https://hbase.apache.org/book.html#client.rpcconnectionregistry 2024-11-28T09:21:05,403 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6321da62, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T09:21:05,407 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-28T09:21:05,407 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-28T09:21:05,416 DEBUG [hconnection-0x13adb0ff-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T09:21:05,424 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58900, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T09:21:05,433 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=363d8d38a970,34825,1732785659868 2024-11-28T09:21:05,449 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithBasicPolicy#testMixedAtomicity Thread=219, OpenFileDescriptor=444, MaxFileDescriptor=1048576, SystemLoadAverage=300, ProcessCount=11, AvailableMemoryMB=5445 2024-11-28T09:21:05,461 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-28T09:21:05,465 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57194, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-28T09:21:05,473 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-28T09:21:05,477 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-28T09:21:05,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-28T09:21:05,482 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-28T09:21:05,483 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:05,484 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-28T09:21:05,485 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 9 2024-11-28T09:21:05,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-28T09:21:05,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741837_1013 (size=960) 2024-11-28T09:21:05,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-28T09:21:05,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-28T09:21:05,898 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532 2024-11-28T09:21:05,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741838_1014 (size=53) 2024-11-28T09:21:05,909 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T09:21:05,909 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 9324112e51bee406916a385aca28ddff, disabling compactions & flushes 2024-11-28T09:21:05,909 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:05,909 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:05,909 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. after waiting 0 ms 2024-11-28T09:21:05,909 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:05,909 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:05,909 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 9324112e51bee406916a385aca28ddff: 2024-11-28T09:21:05,911 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-28T09:21:05,912 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732785665912"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732785665912"}]},"ts":"1732785665912"} 2024-11-28T09:21:05,915 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-28T09:21:05,917 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-28T09:21:05,917 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732785665917"}]},"ts":"1732785665917"} 2024-11-28T09:21:05,920 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-28T09:21:05,924 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=9324112e51bee406916a385aca28ddff, ASSIGN}] 2024-11-28T09:21:05,925 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=9324112e51bee406916a385aca28ddff, ASSIGN 2024-11-28T09:21:05,927 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=9324112e51bee406916a385aca28ddff, ASSIGN; state=OFFLINE, location=363d8d38a970,33819,1732785660637; forceNewPlan=false, retain=false 2024-11-28T09:21:06,078 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=9324112e51bee406916a385aca28ddff, regionState=OPENING, regionLocation=363d8d38a970,33819,1732785660637 2024-11-28T09:21:06,085 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure 9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637}] 2024-11-28T09:21:06,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-28T09:21:06,239 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:06,245 INFO [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:06,245 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} 2024-11-28T09:21:06,246 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 9324112e51bee406916a385aca28ddff 2024-11-28T09:21:06,246 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T09:21:06,246 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for 9324112e51bee406916a385aca28ddff 2024-11-28T09:21:06,246 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for 9324112e51bee406916a385aca28ddff 2024-11-28T09:21:06,248 INFO [StoreOpener-9324112e51bee406916a385aca28ddff-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 9324112e51bee406916a385aca28ddff 2024-11-28T09:21:06,251 INFO [StoreOpener-9324112e51bee406916a385aca28ddff-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-28T09:21:06,251 INFO [StoreOpener-9324112e51bee406916a385aca28ddff-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9324112e51bee406916a385aca28ddff columnFamilyName A 2024-11-28T09:21:06,251 DEBUG [StoreOpener-9324112e51bee406916a385aca28ddff-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:06,252 INFO [StoreOpener-9324112e51bee406916a385aca28ddff-1 {}] regionserver.HStore(327): Store=9324112e51bee406916a385aca28ddff/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T09:21:06,253 INFO [StoreOpener-9324112e51bee406916a385aca28ddff-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 9324112e51bee406916a385aca28ddff 2024-11-28T09:21:06,254 INFO [StoreOpener-9324112e51bee406916a385aca28ddff-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-28T09:21:06,255 INFO [StoreOpener-9324112e51bee406916a385aca28ddff-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9324112e51bee406916a385aca28ddff columnFamilyName B 2024-11-28T09:21:06,255 DEBUG [StoreOpener-9324112e51bee406916a385aca28ddff-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:06,256 INFO [StoreOpener-9324112e51bee406916a385aca28ddff-1 {}] regionserver.HStore(327): Store=9324112e51bee406916a385aca28ddff/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T09:21:06,256 INFO [StoreOpener-9324112e51bee406916a385aca28ddff-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 9324112e51bee406916a385aca28ddff 2024-11-28T09:21:06,257 INFO [StoreOpener-9324112e51bee406916a385aca28ddff-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-28T09:21:06,258 INFO [StoreOpener-9324112e51bee406916a385aca28ddff-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9324112e51bee406916a385aca28ddff columnFamilyName C 2024-11-28T09:21:06,258 DEBUG [StoreOpener-9324112e51bee406916a385aca28ddff-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:06,258 INFO [StoreOpener-9324112e51bee406916a385aca28ddff-1 {}] regionserver.HStore(327): Store=9324112e51bee406916a385aca28ddff/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T09:21:06,259 INFO [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:06,260 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff 2024-11-28T09:21:06,261 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff 2024-11-28T09:21:06,264 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-28T09:21:06,266 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for 9324112e51bee406916a385aca28ddff 2024-11-28T09:21:06,270 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-28T09:21:06,271 INFO [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1102): Opened 9324112e51bee406916a385aca28ddff; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71850088, jitterRate=0.07064974308013916}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-28T09:21:06,272 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for 9324112e51bee406916a385aca28ddff: 2024-11-28T09:21:06,274 INFO [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff., pid=11, masterSystemTime=1732785666238 2024-11-28T09:21:06,277 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:06,277 INFO [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:06,278 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=9324112e51bee406916a385aca28ddff, regionState=OPEN, openSeqNum=2, regionLocation=363d8d38a970,33819,1732785660637 2024-11-28T09:21:06,285 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-11-28T09:21:06,286 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure 9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 in 196 msec 2024-11-28T09:21:06,288 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-11-28T09:21:06,288 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=9324112e51bee406916a385aca28ddff, ASSIGN in 361 msec 2024-11-28T09:21:06,289 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-28T09:21:06,290 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732785666290"}]},"ts":"1732785666290"} 2024-11-28T09:21:06,292 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-28T09:21:06,295 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-28T09:21:06,298 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 818 msec 2024-11-28T09:21:06,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-28T09:21:06,605 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 9 completed 2024-11-28T09:21:06,611 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x63607639 to 127.0.0.1:53251 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2e67f019 2024-11-28T09:21:06,616 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6fcb5f29, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T09:21:06,618 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T09:21:06,620 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58908, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T09:21:06,623 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-28T09:21:06,625 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57202, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-28T09:21:06,633 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x53b8a93e to 127.0.0.1:53251 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5095ba91 2024-11-28T09:21:06,636 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1f2091cc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T09:21:06,638 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6f343a4d to 127.0.0.1:53251 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@12885408 2024-11-28T09:21:06,641 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@22cb07dd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T09:21:06,642 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1324ee83 to 127.0.0.1:53251 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@62c43377 2024-11-28T09:21:06,645 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@18cb251d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T09:21:06,646 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x478bae6b to 127.0.0.1:53251 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4977266 2024-11-28T09:21:06,649 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5400112e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T09:21:06,651 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x048068a5 to 127.0.0.1:53251 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5a8f4734 2024-11-28T09:21:06,654 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@e52b42a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T09:21:06,656 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1b5f27aa to 127.0.0.1:53251 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@10c964e8 2024-11-28T09:21:06,662 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@9ed28bb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T09:21:06,663 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x295cb1ac to 127.0.0.1:53251 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@72e97e4b 2024-11-28T09:21:06,671 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@12a1285d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T09:21:06,672 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x490457fd to 127.0.0.1:53251 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@527c6d40 2024-11-28T09:21:06,675 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@353bc462, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T09:21:06,677 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1d2a8e08 to 127.0.0.1:53251 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2c8de680 2024-11-28T09:21:06,679 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@47fe2fa7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T09:21:06,685 DEBUG [hconnection-0x7c0c677-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T09:21:06,685 DEBUG [hconnection-0x72d84b08-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T09:21:06,692 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T09:21:06,694 DEBUG [hconnection-0x1d7ad05f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T09:21:06,695 DEBUG [hconnection-0xaeae253-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T09:21:06,695 DEBUG [hconnection-0x2735bed9-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T09:21:06,697 DEBUG [hconnection-0x71942b95-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T09:21:06,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] procedure2.ProcedureExecutor(1098): Stored pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees 2024-11-28T09:21:06,699 DEBUG [hconnection-0x4d0fdb91-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T09:21:06,699 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T09:21:06,700 DEBUG [hconnection-0x4f2a833-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T09:21:06,700 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58922, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T09:21:06,701 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58910, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T09:21:06,701 DEBUG [hconnection-0x76a5e6ba-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T09:21:06,702 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T09:21:06,703 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58928, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T09:21:06,703 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58932, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T09:21:06,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-28T09:21:06,704 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T09:21:06,705 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58948, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T09:21:06,705 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58950, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T09:21:06,711 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58952, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T09:21:06,714 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58956, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T09:21:06,725 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58968, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T09:21:06,764 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9324112e51bee406916a385aca28ddff 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-28T09:21:06,773 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=A 2024-11-28T09:21:06,774 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:06,775 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=B 2024-11-28T09:21:06,775 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:06,775 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=C 2024-11-28T09:21:06,775 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:06,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 9324112e51bee406916a385aca28ddff 2024-11-28T09:21:06,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-28T09:21:06,869 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:06,871 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-28T09:21:06,880 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:06,880 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. as already flushing 2024-11-28T09:21:06,881 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:06,881 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:06,883 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:06,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:06,943 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/7e1ea91b04fe4819a0bfc90a69f46818 is 50, key is test_row_0/A:col10/1732785666756/Put/seqid=0 2024-11-28T09:21:06,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741839_1015 (size=14341) 2024-11-28T09:21:06,983 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/7e1ea91b04fe4819a0bfc90a69f46818 2024-11-28T09:21:07,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-28T09:21:07,046 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:07,046 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:07,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58950 deadline: 1732785726958, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:07,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58956 deadline: 1732785726970, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:07,048 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:07,049 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-28T09:21:07,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:07,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. as already flushing 2024-11-28T09:21:07,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:07,052 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:07,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:07,053 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:07,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58928 deadline: 1732785727048, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:07,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:07,060 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:07,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58952 deadline: 1732785727054, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:07,062 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:07,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58910 deadline: 1732785727054, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:07,082 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/a967d15758314ec1bfef65c494186807 is 50, key is test_row_0/B:col10/1732785666756/Put/seqid=0 2024-11-28T09:21:07,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741840_1016 (size=12001) 2024-11-28T09:21:07,158 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:07,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58956 deadline: 1732785727153, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:07,170 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:07,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58952 deadline: 1732785727166, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:07,172 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:07,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58928 deadline: 1732785727167, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:07,173 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:07,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58950 deadline: 1732785727168, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:07,175 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:07,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58910 deadline: 1732785727169, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:07,208 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:07,209 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-28T09:21:07,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:07,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. as already flushing 2024-11-28T09:21:07,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:07,211 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:07,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:07,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:07,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-28T09:21:07,366 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:07,365 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:07,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58956 deadline: 1732785727363, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:07,367 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-28T09:21:07,367 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:07,367 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. as already flushing 2024-11-28T09:21:07,367 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:07,367 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:07,368 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:07,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:07,379 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:07,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58928 deadline: 1732785727376, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:07,381 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:07,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58952 deadline: 1732785727376, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:07,385 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:07,386 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:07,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58950 deadline: 1732785727378, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:07,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58910 deadline: 1732785727380, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:07,512 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/a967d15758314ec1bfef65c494186807 2024-11-28T09:21:07,523 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:07,524 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-28T09:21:07,525 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:07,525 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. as already flushing 2024-11-28T09:21:07,525 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:07,525 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:07,525 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:07,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:07,561 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/937cea3554b44cf1a054e114e354c292 is 50, key is test_row_0/C:col10/1732785666756/Put/seqid=0 2024-11-28T09:21:07,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741841_1017 (size=12001) 2024-11-28T09:21:07,580 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/937cea3554b44cf1a054e114e354c292 2024-11-28T09:21:07,597 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/7e1ea91b04fe4819a0bfc90a69f46818 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/7e1ea91b04fe4819a0bfc90a69f46818 2024-11-28T09:21:07,610 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/7e1ea91b04fe4819a0bfc90a69f46818, entries=200, sequenceid=12, filesize=14.0 K 2024-11-28T09:21:07,618 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/a967d15758314ec1bfef65c494186807 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/a967d15758314ec1bfef65c494186807 2024-11-28T09:21:07,630 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/a967d15758314ec1bfef65c494186807, entries=150, sequenceid=12, filesize=11.7 K 2024-11-28T09:21:07,632 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/937cea3554b44cf1a054e114e354c292 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/937cea3554b44cf1a054e114e354c292 2024-11-28T09:21:07,652 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/937cea3554b44cf1a054e114e354c292, entries=150, sequenceid=12, filesize=11.7 K 2024-11-28T09:21:07,656 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for 9324112e51bee406916a385aca28ddff in 892ms, sequenceid=12, compaction requested=false 2024-11-28T09:21:07,658 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-11-28T09:21:07,659 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9324112e51bee406916a385aca28ddff: 2024-11-28T09:21:07,680 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:07,681 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-28T09:21:07,681 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:07,682 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2837): Flushing 9324112e51bee406916a385aca28ddff 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-28T09:21:07,685 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. as already flushing 2024-11-28T09:21:07,685 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=A 2024-11-28T09:21:07,685 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:07,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 9324112e51bee406916a385aca28ddff 2024-11-28T09:21:07,685 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=B 2024-11-28T09:21:07,685 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:07,685 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=C 2024-11-28T09:21:07,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:07,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/a9882ac20a764f4ba222c6a9499bf8c7 is 50, key is test_row_0/A:col10/1732785666986/Put/seqid=0 2024-11-28T09:21:07,739 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:07,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58956 deadline: 1732785727706, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:07,741 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:07,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58910 deadline: 1732785727706, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:07,742 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:07,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58950 deadline: 1732785727711, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:07,748 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:07,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58928 deadline: 1732785727740, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:07,750 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:07,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58952 deadline: 1732785727741, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:07,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741842_1018 (size=14341) 2024-11-28T09:21:07,768 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/a9882ac20a764f4ba222c6a9499bf8c7 2024-11-28T09:21:07,794 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/92499738def54736ab192558e2fcfefe is 50, key is test_row_0/B:col10/1732785666986/Put/seqid=0 2024-11-28T09:21:07,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-28T09:21:07,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741843_1019 (size=12001) 2024-11-28T09:21:07,836 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/92499738def54736ab192558e2fcfefe 2024-11-28T09:21:07,856 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:07,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58910 deadline: 1732785727854, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:07,857 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:07,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58950 deadline: 1732785727855, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:07,858 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:07,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58956 deadline: 1732785727855, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:07,860 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:07,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58928 deadline: 1732785727854, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:07,861 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:07,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58952 deadline: 1732785727859, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:07,874 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/5fd300af94c34569b4cfe84a5f73bc47 is 50, key is test_row_0/C:col10/1732785666986/Put/seqid=0 2024-11-28T09:21:07,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741844_1020 (size=12001) 2024-11-28T09:21:08,069 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:08,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58950 deadline: 1732785728062, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:08,070 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:08,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58956 deadline: 1732785728062, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:08,076 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:08,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58910 deadline: 1732785728064, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:08,078 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:08,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58928 deadline: 1732785728064, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:08,078 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:08,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58952 deadline: 1732785728065, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:08,298 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/5fd300af94c34569b4cfe84a5f73bc47 2024-11-28T09:21:08,316 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/a9882ac20a764f4ba222c6a9499bf8c7 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/a9882ac20a764f4ba222c6a9499bf8c7 2024-11-28T09:21:08,331 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/a9882ac20a764f4ba222c6a9499bf8c7, entries=200, sequenceid=39, filesize=14.0 K 2024-11-28T09:21:08,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/92499738def54736ab192558e2fcfefe as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/92499738def54736ab192558e2fcfefe 2024-11-28T09:21:08,366 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/92499738def54736ab192558e2fcfefe, entries=150, sequenceid=39, filesize=11.7 K 2024-11-28T09:21:08,372 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/5fd300af94c34569b4cfe84a5f73bc47 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/5fd300af94c34569b4cfe84a5f73bc47 2024-11-28T09:21:08,382 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:08,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58950 deadline: 1732785728374, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:08,387 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:08,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58956 deadline: 1732785728376, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:08,388 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/5fd300af94c34569b4cfe84a5f73bc47, entries=150, sequenceid=39, filesize=11.7 K 2024-11-28T09:21:08,388 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:08,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58910 deadline: 1732785728381, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:08,391 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=46.96 KB/48090 for 9324112e51bee406916a385aca28ddff in 709ms, sequenceid=39, compaction requested=false 2024-11-28T09:21:08,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2538): Flush status journal for 9324112e51bee406916a385aca28ddff: 2024-11-28T09:21:08,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:08,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 9324112e51bee406916a385aca28ddff 2024-11-28T09:21:08,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=13 2024-11-28T09:21:08,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4106): Remote procedure done, pid=13 2024-11-28T09:21:08,393 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9324112e51bee406916a385aca28ddff 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-28T09:21:08,393 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=A 2024-11-28T09:21:08,393 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:08,393 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=B 2024-11-28T09:21:08,394 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:08,394 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=C 2024-11-28T09:21:08,394 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:08,401 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=13, resume processing ppid=12 2024-11-28T09:21:08,401 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=13, ppid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6900 sec 2024-11-28T09:21:08,407 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees in 1.7090 sec 2024-11-28T09:21:08,428 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/fe05bbe89ff64b239bff72b0430fd8bd is 50, key is test_row_0/A:col10/1732785668390/Put/seqid=0 2024-11-28T09:21:08,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741845_1021 (size=14341) 2024-11-28T09:21:08,453 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=50 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/fe05bbe89ff64b239bff72b0430fd8bd 2024-11-28T09:21:08,477 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/ee62ac3905b74c81829271c17a5753c7 is 50, key is test_row_0/B:col10/1732785668390/Put/seqid=0 2024-11-28T09:21:08,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741846_1022 (size=12001) 2024-11-28T09:21:08,494 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=50 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/ee62ac3905b74c81829271c17a5753c7 2024-11-28T09:21:08,528 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/7563fe8c408f442a94d8bd33622ec3ba is 50, key is test_row_0/C:col10/1732785668390/Put/seqid=0 2024-11-28T09:21:08,560 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-28T09:21:08,561 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-11-28T09:21:08,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741847_1023 (size=12001) 2024-11-28T09:21:08,567 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:08,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58928 deadline: 1732785728555, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:08,571 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:08,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58952 deadline: 1732785728568, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:08,677 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:08,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58952 deadline: 1732785728674, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:08,679 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:08,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58928 deadline: 1732785728674, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:08,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-28T09:21:08,814 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 12 completed 2024-11-28T09:21:08,817 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T09:21:08,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] procedure2.ProcedureExecutor(1098): Stored pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees 2024-11-28T09:21:08,823 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T09:21:08,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-28T09:21:08,824 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T09:21:08,825 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=15, ppid=14, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T09:21:08,886 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:08,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58928 deadline: 1732785728882, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:08,889 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:08,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58952 deadline: 1732785728882, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:08,892 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:08,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58950 deadline: 1732785728889, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:08,894 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:08,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58956 deadline: 1732785728890, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:08,900 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:08,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58910 deadline: 1732785728895, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:08,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-28T09:21:08,969 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=50 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/7563fe8c408f442a94d8bd33622ec3ba 2024-11-28T09:21:08,979 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:08,979 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-28T09:21:08,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:08,983 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/fe05bbe89ff64b239bff72b0430fd8bd as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/fe05bbe89ff64b239bff72b0430fd8bd 2024-11-28T09:21:08,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. as already flushing 2024-11-28T09:21:08,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:08,985 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:08,985 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:08,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:09,004 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/fe05bbe89ff64b239bff72b0430fd8bd, entries=200, sequenceid=50, filesize=14.0 K 2024-11-28T09:21:09,008 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/ee62ac3905b74c81829271c17a5753c7 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/ee62ac3905b74c81829271c17a5753c7 2024-11-28T09:21:09,019 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/ee62ac3905b74c81829271c17a5753c7, entries=150, sequenceid=50, filesize=11.7 K 2024-11-28T09:21:09,021 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/7563fe8c408f442a94d8bd33622ec3ba as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/7563fe8c408f442a94d8bd33622ec3ba 2024-11-28T09:21:09,035 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/7563fe8c408f442a94d8bd33622ec3ba, entries=150, sequenceid=50, filesize=11.7 K 2024-11-28T09:21:09,036 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 9324112e51bee406916a385aca28ddff in 643ms, sequenceid=50, compaction requested=true 2024-11-28T09:21:09,037 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9324112e51bee406916a385aca28ddff: 2024-11-28T09:21:09,039 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9324112e51bee406916a385aca28ddff:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T09:21:09,039 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:21:09,039 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9324112e51bee406916a385aca28ddff:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T09:21:09,039 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:21:09,039 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9324112e51bee406916a385aca28ddff:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T09:21:09,039 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:21:09,039 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:21:09,039 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:21:09,043 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:21:09,043 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 43023 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:21:09,045 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1540): 9324112e51bee406916a385aca28ddff/A is initiating minor compaction (all files) 2024-11-28T09:21:09,045 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): 9324112e51bee406916a385aca28ddff/B is initiating minor compaction (all files) 2024-11-28T09:21:09,045 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9324112e51bee406916a385aca28ddff/A in TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:09,045 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9324112e51bee406916a385aca28ddff/B in TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:09,046 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/a967d15758314ec1bfef65c494186807, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/92499738def54736ab192558e2fcfefe, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/ee62ac3905b74c81829271c17a5753c7] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp, totalSize=35.2 K 2024-11-28T09:21:09,046 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/7e1ea91b04fe4819a0bfc90a69f46818, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/a9882ac20a764f4ba222c6a9499bf8c7, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/fe05bbe89ff64b239bff72b0430fd8bd] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp, totalSize=42.0 K 2024-11-28T09:21:09,048 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting a967d15758314ec1bfef65c494186807, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=1732785666732 2024-11-28T09:21:09,048 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7e1ea91b04fe4819a0bfc90a69f46818, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=1732785666732 2024-11-28T09:21:09,049 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting a9882ac20a764f4ba222c6a9499bf8c7, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1732785666969 2024-11-28T09:21:09,049 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 92499738def54736ab192558e2fcfefe, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1732785666969 2024-11-28T09:21:09,050 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting ee62ac3905b74c81829271c17a5753c7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1732785667700 2024-11-28T09:21:09,050 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting fe05bbe89ff64b239bff72b0430fd8bd, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1732785667700 2024-11-28T09:21:09,087 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9324112e51bee406916a385aca28ddff#B#compaction#9 average throughput is 0.50 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:21:09,088 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/f55d313d1d0245429f8f4c92bb2c3f32 is 50, key is test_row_0/B:col10/1732785668390/Put/seqid=0 2024-11-28T09:21:09,093 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9324112e51bee406916a385aca28ddff#A#compaction#10 average throughput is 0.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:21:09,094 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/cab232f0eb2045858f38cdbc52497af7 is 50, key is test_row_0/A:col10/1732785668390/Put/seqid=0 2024-11-28T09:21:09,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741849_1025 (size=12104) 2024-11-28T09:21:09,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-28T09:21:09,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741848_1024 (size=12104) 2024-11-28T09:21:09,138 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:09,139 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-28T09:21:09,139 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:09,140 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2837): Flushing 9324112e51bee406916a385aca28ddff 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-28T09:21:09,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=A 2024-11-28T09:21:09,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:09,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=B 2024-11-28T09:21:09,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:09,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=C 2024-11-28T09:21:09,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:09,142 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/cab232f0eb2045858f38cdbc52497af7 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/cab232f0eb2045858f38cdbc52497af7 2024-11-28T09:21:09,150 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/9f266bdf4df248049c2b1263fbf5ec5f is 50, key is test_row_0/A:col10/1732785668551/Put/seqid=0 2024-11-28T09:21:09,168 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9324112e51bee406916a385aca28ddff/A of 9324112e51bee406916a385aca28ddff into cab232f0eb2045858f38cdbc52497af7(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:21:09,169 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9324112e51bee406916a385aca28ddff: 2024-11-28T09:21:09,169 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff., storeName=9324112e51bee406916a385aca28ddff/A, priority=13, startTime=1732785669038; duration=0sec 2024-11-28T09:21:09,170 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:21:09,170 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9324112e51bee406916a385aca28ddff:A 2024-11-28T09:21:09,170 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:21:09,184 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:21:09,184 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1540): 9324112e51bee406916a385aca28ddff/C is initiating minor compaction (all files) 2024-11-28T09:21:09,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741850_1026 (size=12001) 2024-11-28T09:21:09,185 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9324112e51bee406916a385aca28ddff/C in TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:09,185 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/937cea3554b44cf1a054e114e354c292, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/5fd300af94c34569b4cfe84a5f73bc47, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/7563fe8c408f442a94d8bd33622ec3ba] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp, totalSize=35.2 K 2024-11-28T09:21:09,188 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 937cea3554b44cf1a054e114e354c292, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=1732785666732 2024-11-28T09:21:09,189 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5fd300af94c34569b4cfe84a5f73bc47, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1732785666969 2024-11-28T09:21:09,190 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7563fe8c408f442a94d8bd33622ec3ba, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1732785667700 2024-11-28T09:21:09,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 9324112e51bee406916a385aca28ddff 2024-11-28T09:21:09,197 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. as already flushing 2024-11-28T09:21:09,232 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:09,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58952 deadline: 1732785729227, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:09,236 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9324112e51bee406916a385aca28ddff#C#compaction#12 average throughput is 0.94 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:21:09,237 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/1cc22f2a610141409c960909cfb55c7c is 50, key is test_row_0/C:col10/1732785668390/Put/seqid=0 2024-11-28T09:21:09,238 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:09,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58928 deadline: 1732785729230, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:09,240 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-28T09:21:09,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741851_1027 (size=12104) 2024-11-28T09:21:09,299 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/1cc22f2a610141409c960909cfb55c7c as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/1cc22f2a610141409c960909cfb55c7c 2024-11-28T09:21:09,312 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9324112e51bee406916a385aca28ddff/C of 9324112e51bee406916a385aca28ddff into 1cc22f2a610141409c960909cfb55c7c(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:21:09,312 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9324112e51bee406916a385aca28ddff: 2024-11-28T09:21:09,312 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff., storeName=9324112e51bee406916a385aca28ddff/C, priority=13, startTime=1732785669039; duration=0sec 2024-11-28T09:21:09,312 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:21:09,312 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9324112e51bee406916a385aca28ddff:C 2024-11-28T09:21:09,343 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:09,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58952 deadline: 1732785729335, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:09,344 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:09,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58928 deadline: 1732785729341, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:09,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-28T09:21:09,551 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:09,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58928 deadline: 1732785729547, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:09,553 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:09,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58952 deadline: 1732785729547, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:09,555 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/f55d313d1d0245429f8f4c92bb2c3f32 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/f55d313d1d0245429f8f4c92bb2c3f32 2024-11-28T09:21:09,570 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9324112e51bee406916a385aca28ddff/B of 9324112e51bee406916a385aca28ddff into f55d313d1d0245429f8f4c92bb2c3f32(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:21:09,570 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9324112e51bee406916a385aca28ddff: 2024-11-28T09:21:09,570 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff., storeName=9324112e51bee406916a385aca28ddff/B, priority=13, startTime=1732785669039; duration=0sec 2024-11-28T09:21:09,570 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:21:09,571 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9324112e51bee406916a385aca28ddff:B 2024-11-28T09:21:09,587 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=75 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/9f266bdf4df248049c2b1263fbf5ec5f 2024-11-28T09:21:09,619 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/2ff9fc18a1674860b0b09fba684f04db is 50, key is test_row_0/B:col10/1732785668551/Put/seqid=0 2024-11-28T09:21:09,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741852_1028 (size=12001) 2024-11-28T09:21:09,653 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=75 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/2ff9fc18a1674860b0b09fba684f04db 2024-11-28T09:21:09,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/d64b8d699d1f4b3e8dc61289750f0a04 is 50, key is test_row_0/C:col10/1732785668551/Put/seqid=0 2024-11-28T09:21:09,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741853_1029 (size=12001) 2024-11-28T09:21:09,731 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=75 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/d64b8d699d1f4b3e8dc61289750f0a04 2024-11-28T09:21:09,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/9f266bdf4df248049c2b1263fbf5ec5f as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/9f266bdf4df248049c2b1263fbf5ec5f 2024-11-28T09:21:09,764 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/9f266bdf4df248049c2b1263fbf5ec5f, entries=150, sequenceid=75, filesize=11.7 K 2024-11-28T09:21:09,767 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/2ff9fc18a1674860b0b09fba684f04db as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/2ff9fc18a1674860b0b09fba684f04db 2024-11-28T09:21:09,781 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/2ff9fc18a1674860b0b09fba684f04db, entries=150, sequenceid=75, filesize=11.7 K 2024-11-28T09:21:09,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/d64b8d699d1f4b3e8dc61289750f0a04 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/d64b8d699d1f4b3e8dc61289750f0a04 2024-11-28T09:21:09,799 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/d64b8d699d1f4b3e8dc61289750f0a04, entries=150, sequenceid=75, filesize=11.7 K 2024-11-28T09:21:09,802 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 9324112e51bee406916a385aca28ddff in 662ms, sequenceid=75, compaction requested=false 2024-11-28T09:21:09,802 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2538): Flush status journal for 9324112e51bee406916a385aca28ddff: 2024-11-28T09:21:09,802 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:09,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=15 2024-11-28T09:21:09,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4106): Remote procedure done, pid=15 2024-11-28T09:21:09,808 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=15, resume processing ppid=14 2024-11-28T09:21:09,808 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=15, ppid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 980 msec 2024-11-28T09:21:09,813 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees in 993 msec 2024-11-28T09:21:09,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 9324112e51bee406916a385aca28ddff 2024-11-28T09:21:09,858 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9324112e51bee406916a385aca28ddff 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-28T09:21:09,859 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=A 2024-11-28T09:21:09,859 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:09,859 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=B 2024-11-28T09:21:09,859 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:09,859 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=C 2024-11-28T09:21:09,859 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:09,891 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/966aacd9447d40bbbbd1c89b498116a5 is 50, key is test_row_0/A:col10/1732785669228/Put/seqid=0 2024-11-28T09:21:09,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741854_1030 (size=12001) 2024-11-28T09:21:09,925 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=90 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/966aacd9447d40bbbbd1c89b498116a5 2024-11-28T09:21:09,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-28T09:21:09,931 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 14 completed 2024-11-28T09:21:09,934 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T09:21:09,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] procedure2.ProcedureExecutor(1098): Stored pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees 2024-11-28T09:21:09,937 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T09:21:09,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-28T09:21:09,938 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T09:21:09,938 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=17, ppid=16, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T09:21:09,945 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:09,946 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:09,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58956 deadline: 1732785729934, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:09,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58910 deadline: 1732785729935, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:09,951 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/6b6252abc20944539f22915924c21355 is 50, key is test_row_0/B:col10/1732785669228/Put/seqid=0 2024-11-28T09:21:09,952 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:09,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58952 deadline: 1732785729944, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:09,953 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:09,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58928 deadline: 1732785729945, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:09,954 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:09,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58950 deadline: 1732785729945, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:09,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741855_1031 (size=12001) 2024-11-28T09:21:10,012 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=90 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/6b6252abc20944539f22915924c21355 2024-11-28T09:21:10,034 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/eea68bd99e214065959bfec3755c299d is 50, key is test_row_0/C:col10/1732785669228/Put/seqid=0 2024-11-28T09:21:10,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-28T09:21:10,050 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:10,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58910 deadline: 1732785730048, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:10,052 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:10,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58956 deadline: 1732785730048, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:10,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741856_1032 (size=12001) 2024-11-28T09:21:10,059 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:10,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58952 deadline: 1732785730055, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:10,060 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:10,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58950 deadline: 1732785730056, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:10,063 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=90 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/eea68bd99e214065959bfec3755c299d 2024-11-28T09:21:10,064 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:10,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58928 deadline: 1732785730059, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:10,074 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/966aacd9447d40bbbbd1c89b498116a5 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/966aacd9447d40bbbbd1c89b498116a5 2024-11-28T09:21:10,084 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/966aacd9447d40bbbbd1c89b498116a5, entries=150, sequenceid=90, filesize=11.7 K 2024-11-28T09:21:10,086 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/6b6252abc20944539f22915924c21355 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/6b6252abc20944539f22915924c21355 2024-11-28T09:21:10,092 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:10,093 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-28T09:21:10,093 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:10,093 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. as already flushing 2024-11-28T09:21:10,093 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:10,093 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:10,094 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:10,096 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/6b6252abc20944539f22915924c21355, entries=150, sequenceid=90, filesize=11.7 K 2024-11-28T09:21:10,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:10,097 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/eea68bd99e214065959bfec3755c299d as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/eea68bd99e214065959bfec3755c299d 2024-11-28T09:21:10,107 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/eea68bd99e214065959bfec3755c299d, entries=150, sequenceid=90, filesize=11.7 K 2024-11-28T09:21:10,109 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for 9324112e51bee406916a385aca28ddff in 251ms, sequenceid=90, compaction requested=true 2024-11-28T09:21:10,109 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9324112e51bee406916a385aca28ddff: 2024-11-28T09:21:10,109 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9324112e51bee406916a385aca28ddff:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T09:21:10,109 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:21:10,109 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:21:10,109 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9324112e51bee406916a385aca28ddff:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T09:21:10,110 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:21:10,110 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9324112e51bee406916a385aca28ddff:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T09:21:10,110 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-28T09:21:10,110 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:21:10,114 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:21:10,114 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1540): 9324112e51bee406916a385aca28ddff/A is initiating minor compaction (all files) 2024-11-28T09:21:10,114 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9324112e51bee406916a385aca28ddff/A in TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:10,114 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/cab232f0eb2045858f38cdbc52497af7, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/9f266bdf4df248049c2b1263fbf5ec5f, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/966aacd9447d40bbbbd1c89b498116a5] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp, totalSize=35.3 K 2024-11-28T09:21:10,116 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:21:10,116 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): 9324112e51bee406916a385aca28ddff/B is initiating minor compaction (all files) 2024-11-28T09:21:10,116 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9324112e51bee406916a385aca28ddff/B in TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:10,116 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/f55d313d1d0245429f8f4c92bb2c3f32, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/2ff9fc18a1674860b0b09fba684f04db, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/6b6252abc20944539f22915924c21355] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp, totalSize=35.3 K 2024-11-28T09:21:10,117 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting cab232f0eb2045858f38cdbc52497af7, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1732785667700 2024-11-28T09:21:10,120 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting f55d313d1d0245429f8f4c92bb2c3f32, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1732785667700 2024-11-28T09:21:10,120 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9f266bdf4df248049c2b1263fbf5ec5f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1732785668548 2024-11-28T09:21:10,122 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 2ff9fc18a1674860b0b09fba684f04db, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1732785668548 2024-11-28T09:21:10,123 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 966aacd9447d40bbbbd1c89b498116a5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1732785669213 2024-11-28T09:21:10,124 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 6b6252abc20944539f22915924c21355, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1732785669213 2024-11-28T09:21:10,148 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9324112e51bee406916a385aca28ddff#B#compaction#18 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:21:10,149 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/773286ecd3404e6fb0dff827785c0ec4 is 50, key is test_row_0/B:col10/1732785669228/Put/seqid=0 2024-11-28T09:21:10,154 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9324112e51bee406916a385aca28ddff#A#compaction#19 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:21:10,155 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/28b9dd5e179844fc9fa9d9b8d66aa7f4 is 50, key is test_row_0/A:col10/1732785669228/Put/seqid=0 2024-11-28T09:21:10,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741857_1033 (size=12207) 2024-11-28T09:21:10,196 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/773286ecd3404e6fb0dff827785c0ec4 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/773286ecd3404e6fb0dff827785c0ec4 2024-11-28T09:21:10,210 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9324112e51bee406916a385aca28ddff/B of 9324112e51bee406916a385aca28ddff into 773286ecd3404e6fb0dff827785c0ec4(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:21:10,210 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9324112e51bee406916a385aca28ddff: 2024-11-28T09:21:10,210 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff., storeName=9324112e51bee406916a385aca28ddff/B, priority=13, startTime=1732785670109; duration=0sec 2024-11-28T09:21:10,210 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:21:10,210 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9324112e51bee406916a385aca28ddff:B 2024-11-28T09:21:10,211 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:21:10,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741858_1034 (size=12207) 2024-11-28T09:21:10,215 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:21:10,215 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): 9324112e51bee406916a385aca28ddff/C is initiating minor compaction (all files) 2024-11-28T09:21:10,215 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9324112e51bee406916a385aca28ddff/C in TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:10,215 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/1cc22f2a610141409c960909cfb55c7c, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/d64b8d699d1f4b3e8dc61289750f0a04, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/eea68bd99e214065959bfec3755c299d] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp, totalSize=35.3 K 2024-11-28T09:21:10,217 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 1cc22f2a610141409c960909cfb55c7c, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=50, earliestPutTs=1732785667700 2024-11-28T09:21:10,218 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting d64b8d699d1f4b3e8dc61289750f0a04, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1732785668548 2024-11-28T09:21:10,218 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting eea68bd99e214065959bfec3755c299d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1732785669213 2024-11-28T09:21:10,239 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9324112e51bee406916a385aca28ddff#C#compaction#20 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:21:10,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-28T09:21:10,240 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/e129cc0946c340d39b7fee4fc2cf5363 is 50, key is test_row_0/C:col10/1732785669228/Put/seqid=0 2024-11-28T09:21:10,247 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:10,248 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-28T09:21:10,248 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:10,248 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2837): Flushing 9324112e51bee406916a385aca28ddff 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-28T09:21:10,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=A 2024-11-28T09:21:10,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:10,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=B 2024-11-28T09:21:10,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:10,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=C 2024-11-28T09:21:10,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:10,260 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. as already flushing 2024-11-28T09:21:10,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 9324112e51bee406916a385aca28ddff 2024-11-28T09:21:10,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/6df0d666ff304643b2f5deea360b3b10 is 50, key is test_row_0/A:col10/1732785669923/Put/seqid=0 2024-11-28T09:21:10,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741859_1035 (size=12207) 2024-11-28T09:21:10,281 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:10,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58950 deadline: 1732785730275, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:10,285 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:10,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58956 deadline: 1732785730278, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:10,286 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:10,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58928 deadline: 1732785730279, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:10,287 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:10,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58952 deadline: 1732785730280, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:10,287 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:10,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58910 deadline: 1732785730283, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:10,301 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/e129cc0946c340d39b7fee4fc2cf5363 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/e129cc0946c340d39b7fee4fc2cf5363 2024-11-28T09:21:10,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741860_1036 (size=12001) 2024-11-28T09:21:10,321 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9324112e51bee406916a385aca28ddff/C of 9324112e51bee406916a385aca28ddff into e129cc0946c340d39b7fee4fc2cf5363(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:21:10,322 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9324112e51bee406916a385aca28ddff: 2024-11-28T09:21:10,322 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff., storeName=9324112e51bee406916a385aca28ddff/C, priority=13, startTime=1732785670110; duration=0sec 2024-11-28T09:21:10,322 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:21:10,322 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9324112e51bee406916a385aca28ddff:C 2024-11-28T09:21:10,387 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:10,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58950 deadline: 1732785730384, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:10,390 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-28T09:21:10,390 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:10,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58956 deadline: 1732785730390, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:10,391 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-28T09:21:10,393 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-11-28T09:21:10,393 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_namespace Metrics about Tables on a single HBase RegionServer 2024-11-28T09:21:10,395 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-28T09:21:10,395 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-28T09:21:10,395 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-28T09:21:10,395 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-28T09:21:10,398 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:10,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58952 deadline: 1732785730397, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:10,398 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:10,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58928 deadline: 1732785730398, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:10,402 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-11-28T09:21:10,402 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees Metrics about Tables on a single HBase RegionServer 2024-11-28T09:21:10,403 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:10,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58910 deadline: 1732785730400, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:10,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-28T09:21:10,596 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:10,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58956 deadline: 1732785730593, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:10,596 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:10,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58950 deadline: 1732785730592, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:10,604 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:10,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58952 deadline: 1732785730603, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:10,604 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:10,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58928 deadline: 1732785730601, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:10,609 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:10,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58910 deadline: 1732785730607, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:10,628 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/28b9dd5e179844fc9fa9d9b8d66aa7f4 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/28b9dd5e179844fc9fa9d9b8d66aa7f4 2024-11-28T09:21:10,644 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9324112e51bee406916a385aca28ddff/A of 9324112e51bee406916a385aca28ddff into 28b9dd5e179844fc9fa9d9b8d66aa7f4(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:21:10,645 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9324112e51bee406916a385aca28ddff: 2024-11-28T09:21:10,645 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff., storeName=9324112e51bee406916a385aca28ddff/A, priority=13, startTime=1732785670109; duration=0sec 2024-11-28T09:21:10,645 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:21:10,645 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9324112e51bee406916a385aca28ddff:A 2024-11-28T09:21:10,720 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=116 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/6df0d666ff304643b2f5deea360b3b10 2024-11-28T09:21:10,744 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/1599c39e42544fb884b33043f275019f is 50, key is test_row_0/B:col10/1732785669923/Put/seqid=0 2024-11-28T09:21:10,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741861_1037 (size=12001) 2024-11-28T09:21:10,790 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=116 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/1599c39e42544fb884b33043f275019f 2024-11-28T09:21:10,812 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/0c3f831c93e644bf86ced4b2f7eb7193 is 50, key is test_row_0/C:col10/1732785669923/Put/seqid=0 2024-11-28T09:21:10,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741862_1038 (size=12001) 2024-11-28T09:21:10,837 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=116 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/0c3f831c93e644bf86ced4b2f7eb7193 2024-11-28T09:21:10,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/6df0d666ff304643b2f5deea360b3b10 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/6df0d666ff304643b2f5deea360b3b10 2024-11-28T09:21:10,859 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/6df0d666ff304643b2f5deea360b3b10, entries=150, sequenceid=116, filesize=11.7 K 2024-11-28T09:21:10,860 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/1599c39e42544fb884b33043f275019f as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/1599c39e42544fb884b33043f275019f 2024-11-28T09:21:10,871 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/1599c39e42544fb884b33043f275019f, entries=150, sequenceid=116, filesize=11.7 K 2024-11-28T09:21:10,874 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/0c3f831c93e644bf86ced4b2f7eb7193 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/0c3f831c93e644bf86ced4b2f7eb7193 2024-11-28T09:21:10,890 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/0c3f831c93e644bf86ced4b2f7eb7193, entries=150, sequenceid=116, filesize=11.7 K 2024-11-28T09:21:10,893 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for 9324112e51bee406916a385aca28ddff in 645ms, sequenceid=116, compaction requested=false 2024-11-28T09:21:10,893 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2538): Flush status journal for 9324112e51bee406916a385aca28ddff: 2024-11-28T09:21:10,893 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:10,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=17 2024-11-28T09:21:10,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4106): Remote procedure done, pid=17 2024-11-28T09:21:10,902 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=17, resume processing ppid=16 2024-11-28T09:21:10,902 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=17, ppid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 958 msec 2024-11-28T09:21:10,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 9324112e51bee406916a385aca28ddff 2024-11-28T09:21:10,906 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9324112e51bee406916a385aca28ddff 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-28T09:21:10,909 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=A 2024-11-28T09:21:10,909 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:10,909 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=B 2024-11-28T09:21:10,909 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:10,910 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=C 2024-11-28T09:21:10,910 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:10,911 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees in 971 msec 2024-11-28T09:21:10,926 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/2d0dc6b9254445c5b006f9826cfe0c66 is 50, key is test_row_0/A:col10/1732785670275/Put/seqid=0 2024-11-28T09:21:10,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741863_1039 (size=12051) 2024-11-28T09:21:10,994 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:10,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58950 deadline: 1732785730955, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:10,997 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:10,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58952 deadline: 1732785730992, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:10,997 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:10,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58928 deadline: 1732785730993, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:10,998 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:10,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58910 deadline: 1732785730994, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:10,999 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:11,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58956 deadline: 1732785730998, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:11,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-28T09:21:11,044 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 16 completed 2024-11-28T09:21:11,046 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T09:21:11,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] procedure2.ProcedureExecutor(1098): Stored pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees 2024-11-28T09:21:11,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-28T09:21:11,051 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T09:21:11,053 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T09:21:11,053 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=19, ppid=18, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T09:21:11,103 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:11,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58952 deadline: 1732785731099, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:11,103 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:11,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58910 deadline: 1732785731100, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:11,104 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:11,104 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:11,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58950 deadline: 1732785731101, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:11,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58928 deadline: 1732785731101, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:11,105 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:11,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58956 deadline: 1732785731103, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:11,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-28T09:21:11,206 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:11,207 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-28T09:21:11,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:11,208 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. as already flushing 2024-11-28T09:21:11,208 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:11,208 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:11,208 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:11,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:11,307 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:11,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58952 deadline: 1732785731306, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:11,310 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:11,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58928 deadline: 1732785731307, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:11,311 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:11,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58910 deadline: 1732785731307, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:11,310 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:11,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58950 deadline: 1732785731307, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:11,314 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:11,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58956 deadline: 1732785731311, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:11,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-28T09:21:11,362 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:11,362 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-28T09:21:11,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:11,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. as already flushing 2024-11-28T09:21:11,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:11,363 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:11,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:11,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:11,388 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/2d0dc6b9254445c5b006f9826cfe0c66 2024-11-28T09:21:11,406 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/85c1ffb7bb334915be6cc6ffbf2cc817 is 50, key is test_row_0/B:col10/1732785670275/Put/seqid=0 2024-11-28T09:21:11,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741864_1040 (size=12051) 2024-11-28T09:21:11,454 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/85c1ffb7bb334915be6cc6ffbf2cc817 2024-11-28T09:21:11,478 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/c5e8f8abf08b49fc8218007576064972 is 50, key is test_row_0/C:col10/1732785670275/Put/seqid=0 2024-11-28T09:21:11,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741865_1041 (size=12051) 2024-11-28T09:21:11,517 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:11,518 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-28T09:21:11,518 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:11,518 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. as already flushing 2024-11-28T09:21:11,519 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/c5e8f8abf08b49fc8218007576064972 2024-11-28T09:21:11,519 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:11,519 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:11,519 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:11,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:11,529 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/2d0dc6b9254445c5b006f9826cfe0c66 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/2d0dc6b9254445c5b006f9826cfe0c66 2024-11-28T09:21:11,537 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/2d0dc6b9254445c5b006f9826cfe0c66, entries=150, sequenceid=132, filesize=11.8 K 2024-11-28T09:21:11,539 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/85c1ffb7bb334915be6cc6ffbf2cc817 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/85c1ffb7bb334915be6cc6ffbf2cc817 2024-11-28T09:21:11,547 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/85c1ffb7bb334915be6cc6ffbf2cc817, entries=150, sequenceid=132, filesize=11.8 K 2024-11-28T09:21:11,550 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/c5e8f8abf08b49fc8218007576064972 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/c5e8f8abf08b49fc8218007576064972 2024-11-28T09:21:11,558 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/c5e8f8abf08b49fc8218007576064972, entries=150, sequenceid=132, filesize=11.8 K 2024-11-28T09:21:11,560 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 9324112e51bee406916a385aca28ddff in 654ms, sequenceid=132, compaction requested=true 2024-11-28T09:21:11,560 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9324112e51bee406916a385aca28ddff: 2024-11-28T09:21:11,560 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9324112e51bee406916a385aca28ddff:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T09:21:11,560 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:21:11,560 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:21:11,560 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9324112e51bee406916a385aca28ddff:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T09:21:11,560 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:21:11,561 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9324112e51bee406916a385aca28ddff:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T09:21:11,561 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:21:11,561 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:21:11,562 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36259 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:21:11,562 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1540): 9324112e51bee406916a385aca28ddff/A is initiating minor compaction (all files) 2024-11-28T09:21:11,562 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36259 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:21:11,562 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9324112e51bee406916a385aca28ddff/A in TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:11,563 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): 9324112e51bee406916a385aca28ddff/B is initiating minor compaction (all files) 2024-11-28T09:21:11,563 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/28b9dd5e179844fc9fa9d9b8d66aa7f4, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/6df0d666ff304643b2f5deea360b3b10, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/2d0dc6b9254445c5b006f9826cfe0c66] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp, totalSize=35.4 K 2024-11-28T09:21:11,563 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9324112e51bee406916a385aca28ddff/B in TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:11,563 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/773286ecd3404e6fb0dff827785c0ec4, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/1599c39e42544fb884b33043f275019f, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/85c1ffb7bb334915be6cc6ffbf2cc817] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp, totalSize=35.4 K 2024-11-28T09:21:11,564 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 28b9dd5e179844fc9fa9d9b8d66aa7f4, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1732785669213 2024-11-28T09:21:11,564 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 773286ecd3404e6fb0dff827785c0ec4, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1732785669213 2024-11-28T09:21:11,565 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 1599c39e42544fb884b33043f275019f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1732785669923 2024-11-28T09:21:11,565 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6df0d666ff304643b2f5deea360b3b10, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1732785669923 2024-11-28T09:21:11,566 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 85c1ffb7bb334915be6cc6ffbf2cc817, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1732785670275 2024-11-28T09:21:11,566 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2d0dc6b9254445c5b006f9826cfe0c66, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1732785670275 2024-11-28T09:21:11,586 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9324112e51bee406916a385aca28ddff#A#compaction#27 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:21:11,587 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/e7ff64d653684d10a9c600f549fd9e52 is 50, key is test_row_0/A:col10/1732785670275/Put/seqid=0 2024-11-28T09:21:11,594 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9324112e51bee406916a385aca28ddff#B#compaction#28 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:21:11,595 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/c90ead9366fe4b51a995061681991ac1 is 50, key is test_row_0/B:col10/1732785670275/Put/seqid=0 2024-11-28T09:21:11,616 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9324112e51bee406916a385aca28ddff 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-28T09:21:11,616 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=A 2024-11-28T09:21:11,616 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:11,616 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=B 2024-11-28T09:21:11,616 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:11,617 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=C 2024-11-28T09:21:11,617 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:11,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 9324112e51bee406916a385aca28ddff 2024-11-28T09:21:11,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741866_1042 (size=12359) 2024-11-28T09:21:11,643 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:11,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58928 deadline: 1732785731636, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:11,644 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/f74483259a0041ac9413b50906cdb3c1 is 50, key is test_row_0/A:col10/1732785671613/Put/seqid=0 2024-11-28T09:21:11,644 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:11,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58950 deadline: 1732785731637, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:11,645 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:11,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58956 deadline: 1732785731641, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:11,650 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:11,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58910 deadline: 1732785731646, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:11,652 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:11,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58952 deadline: 1732785731646, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:11,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-28T09:21:11,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741867_1043 (size=12359) 2024-11-28T09:21:11,657 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/e7ff64d653684d10a9c600f549fd9e52 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/e7ff64d653684d10a9c600f549fd9e52 2024-11-28T09:21:11,669 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9324112e51bee406916a385aca28ddff/A of 9324112e51bee406916a385aca28ddff into e7ff64d653684d10a9c600f549fd9e52(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:21:11,669 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9324112e51bee406916a385aca28ddff: 2024-11-28T09:21:11,670 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff., storeName=9324112e51bee406916a385aca28ddff/A, priority=13, startTime=1732785671560; duration=0sec 2024-11-28T09:21:11,670 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:21:11,670 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9324112e51bee406916a385aca28ddff:A 2024-11-28T09:21:11,670 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:21:11,672 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36259 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:21:11,672 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1540): 9324112e51bee406916a385aca28ddff/C is initiating minor compaction (all files) 2024-11-28T09:21:11,672 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9324112e51bee406916a385aca28ddff/C in TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:11,672 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/e129cc0946c340d39b7fee4fc2cf5363, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/0c3f831c93e644bf86ced4b2f7eb7193, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/c5e8f8abf08b49fc8218007576064972] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp, totalSize=35.4 K 2024-11-28T09:21:11,672 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:11,673 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting e129cc0946c340d39b7fee4fc2cf5363, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1732785669213 2024-11-28T09:21:11,674 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0c3f831c93e644bf86ced4b2f7eb7193, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1732785669923 2024-11-28T09:21:11,674 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting c5e8f8abf08b49fc8218007576064972, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1732785670275 2024-11-28T09:21:11,675 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-28T09:21:11,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:11,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. as already flushing 2024-11-28T09:21:11,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:11,675 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:11,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:11,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:11,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741868_1044 (size=14541) 2024-11-28T09:21:11,680 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=155 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/f74483259a0041ac9413b50906cdb3c1 2024-11-28T09:21:11,696 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9324112e51bee406916a385aca28ddff#C#compaction#30 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:21:11,697 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/8c5c8b6ccaef48c4b48beed3863950b0 is 50, key is test_row_0/C:col10/1732785670275/Put/seqid=0 2024-11-28T09:21:11,706 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/3e4debb7e31048298801393fab1b8852 is 50, key is test_row_0/B:col10/1732785671613/Put/seqid=0 2024-11-28T09:21:11,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741869_1045 (size=12359) 2024-11-28T09:21:11,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741870_1046 (size=12151) 2024-11-28T09:21:11,732 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=155 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/3e4debb7e31048298801393fab1b8852 2024-11-28T09:21:11,750 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:11,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58928 deadline: 1732785731746, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:11,750 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:11,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58956 deadline: 1732785731748, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:11,750 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:11,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58950 deadline: 1732785731748, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:11,755 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:11,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58910 deadline: 1732785731752, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:11,757 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:11,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58952 deadline: 1732785731755, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:11,762 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/472fe96c02f24e7d89be5c2012fb4fa8 is 50, key is test_row_0/C:col10/1732785671613/Put/seqid=0 2024-11-28T09:21:11,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741871_1047 (size=12151) 2024-11-28T09:21:11,775 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=155 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/472fe96c02f24e7d89be5c2012fb4fa8 2024-11-28T09:21:11,791 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/f74483259a0041ac9413b50906cdb3c1 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/f74483259a0041ac9413b50906cdb3c1 2024-11-28T09:21:11,802 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/f74483259a0041ac9413b50906cdb3c1, entries=200, sequenceid=155, filesize=14.2 K 2024-11-28T09:21:11,805 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/3e4debb7e31048298801393fab1b8852 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/3e4debb7e31048298801393fab1b8852 2024-11-28T09:21:11,814 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/3e4debb7e31048298801393fab1b8852, entries=150, sequenceid=155, filesize=11.9 K 2024-11-28T09:21:11,816 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/472fe96c02f24e7d89be5c2012fb4fa8 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/472fe96c02f24e7d89be5c2012fb4fa8 2024-11-28T09:21:11,825 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/472fe96c02f24e7d89be5c2012fb4fa8, entries=150, sequenceid=155, filesize=11.9 K 2024-11-28T09:21:11,826 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=73.80 KB/75570 for 9324112e51bee406916a385aca28ddff in 211ms, sequenceid=155, compaction requested=false 2024-11-28T09:21:11,826 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9324112e51bee406916a385aca28ddff: 2024-11-28T09:21:11,830 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:11,831 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-28T09:21:11,831 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:11,831 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2837): Flushing 9324112e51bee406916a385aca28ddff 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-28T09:21:11,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=A 2024-11-28T09:21:11,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:11,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=B 2024-11-28T09:21:11,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:11,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=C 2024-11-28T09:21:11,833 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:11,840 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/d5687b2e4d7843278bc0f46e8aed4fbb is 50, key is test_row_0/A:col10/1732785671637/Put/seqid=0 2024-11-28T09:21:11,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741872_1048 (size=12151) 2024-11-28T09:21:11,868 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=170 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/d5687b2e4d7843278bc0f46e8aed4fbb 2024-11-28T09:21:11,887 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/348ee4e246a04d7698ac79dd6cedb662 is 50, key is test_row_0/B:col10/1732785671637/Put/seqid=0 2024-11-28T09:21:11,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741873_1049 (size=12151) 2024-11-28T09:21:11,922 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=170 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/348ee4e246a04d7698ac79dd6cedb662 2024-11-28T09:21:11,941 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/29762ee6ee7445c9b707d935759db3ec is 50, key is test_row_0/C:col10/1732785671637/Put/seqid=0 2024-11-28T09:21:11,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 9324112e51bee406916a385aca28ddff 2024-11-28T09:21:11,957 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. as already flushing 2024-11-28T09:21:11,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741874_1050 (size=12151) 2024-11-28T09:21:11,982 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=170 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/29762ee6ee7445c9b707d935759db3ec 2024-11-28T09:21:11,993 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/d5687b2e4d7843278bc0f46e8aed4fbb as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/d5687b2e4d7843278bc0f46e8aed4fbb 2024-11-28T09:21:11,998 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:11,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58956 deadline: 1732785731987, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:11,998 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:11,999 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:11,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58950 deadline: 1732785731989, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:11,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58952 deadline: 1732785731989, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:11,998 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:12,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58928 deadline: 1732785731986, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:12,001 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:12,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58910 deadline: 1732785731998, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:12,011 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/d5687b2e4d7843278bc0f46e8aed4fbb, entries=150, sequenceid=170, filesize=11.9 K 2024-11-28T09:21:12,014 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/348ee4e246a04d7698ac79dd6cedb662 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/348ee4e246a04d7698ac79dd6cedb662 2024-11-28T09:21:12,027 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/348ee4e246a04d7698ac79dd6cedb662, entries=150, sequenceid=170, filesize=11.9 K 2024-11-28T09:21:12,031 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/29762ee6ee7445c9b707d935759db3ec as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/29762ee6ee7445c9b707d935759db3ec 2024-11-28T09:21:12,042 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/29762ee6ee7445c9b707d935759db3ec, entries=150, sequenceid=170, filesize=11.9 K 2024-11-28T09:21:12,045 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 9324112e51bee406916a385aca28ddff in 213ms, sequenceid=170, compaction requested=true 2024-11-28T09:21:12,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2538): Flush status journal for 9324112e51bee406916a385aca28ddff: 2024-11-28T09:21:12,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:12,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=19 2024-11-28T09:21:12,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4106): Remote procedure done, pid=19 2024-11-28T09:21:12,050 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=19, resume processing ppid=18 2024-11-28T09:21:12,050 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=19, ppid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 994 msec 2024-11-28T09:21:12,054 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees in 1.0050 sec 2024-11-28T09:21:12,068 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/c90ead9366fe4b51a995061681991ac1 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/c90ead9366fe4b51a995061681991ac1 2024-11-28T09:21:12,084 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9324112e51bee406916a385aca28ddff/B of 9324112e51bee406916a385aca28ddff into c90ead9366fe4b51a995061681991ac1(size=12.1 K), total size for store is 35.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:21:12,084 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9324112e51bee406916a385aca28ddff: 2024-11-28T09:21:12,084 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff., storeName=9324112e51bee406916a385aca28ddff/B, priority=13, startTime=1732785671560; duration=0sec 2024-11-28T09:21:12,084 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:21:12,084 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9324112e51bee406916a385aca28ddff:B 2024-11-28T09:21:12,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 9324112e51bee406916a385aca28ddff 2024-11-28T09:21:12,104 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9324112e51bee406916a385aca28ddff 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-28T09:21:12,106 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=A 2024-11-28T09:21:12,106 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:12,106 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=B 2024-11-28T09:21:12,106 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:12,106 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=C 2024-11-28T09:21:12,107 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:12,125 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:12,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58928 deadline: 1732785732118, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:12,123 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:12,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58952 deadline: 1732785732115, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:12,127 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:12,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58910 deadline: 1732785732120, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:12,130 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/c5c597cd416b481ebb083f053e6798de is 50, key is test_row_0/A:col10/1732785671986/Put/seqid=0 2024-11-28T09:21:12,131 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:12,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58956 deadline: 1732785732123, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:12,132 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/8c5c8b6ccaef48c4b48beed3863950b0 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/8c5c8b6ccaef48c4b48beed3863950b0 2024-11-28T09:21:12,133 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:12,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58950 deadline: 1732785732123, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:12,144 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9324112e51bee406916a385aca28ddff/C of 9324112e51bee406916a385aca28ddff into 8c5c8b6ccaef48c4b48beed3863950b0(size=12.1 K), total size for store is 35.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:21:12,144 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9324112e51bee406916a385aca28ddff: 2024-11-28T09:21:12,144 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff., storeName=9324112e51bee406916a385aca28ddff/C, priority=13, startTime=1732785671561; duration=0sec 2024-11-28T09:21:12,144 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:21:12,144 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9324112e51bee406916a385aca28ddff:C 2024-11-28T09:21:12,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-28T09:21:12,156 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 18 completed 2024-11-28T09:21:12,159 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T09:21:12,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741875_1051 (size=12151) 2024-11-28T09:21:12,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] procedure2.ProcedureExecutor(1098): Stored pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees 2024-11-28T09:21:12,165 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=195 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/c5c597cd416b481ebb083f053e6798de 2024-11-28T09:21:12,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-28T09:21:12,167 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T09:21:12,168 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T09:21:12,168 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=21, ppid=20, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T09:21:12,190 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/af4d4e9df8a9400989f34bd39ac7275f is 50, key is test_row_0/B:col10/1732785671986/Put/seqid=0 2024-11-28T09:21:12,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741876_1052 (size=12151) 2024-11-28T09:21:12,224 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=195 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/af4d4e9df8a9400989f34bd39ac7275f 2024-11-28T09:21:12,232 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:12,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58928 deadline: 1732785732227, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:12,233 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:12,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58952 deadline: 1732785732228, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:12,234 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:12,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58910 deadline: 1732785732229, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:12,237 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:12,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58950 deadline: 1732785732234, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:12,238 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:12,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58956 deadline: 1732785732235, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:12,249 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/a092317c2fee477da3c8a31d47d2fc2c is 50, key is test_row_0/C:col10/1732785671986/Put/seqid=0 2024-11-28T09:21:12,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-28T09:21:12,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741877_1053 (size=12151) 2024-11-28T09:21:12,308 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=195 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/a092317c2fee477da3c8a31d47d2fc2c 2024-11-28T09:21:12,317 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/c5c597cd416b481ebb083f053e6798de as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/c5c597cd416b481ebb083f053e6798de 2024-11-28T09:21:12,320 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:12,321 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-28T09:21:12,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:12,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. as already flushing 2024-11-28T09:21:12,322 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:12,322 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:12,322 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:12,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:12,331 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/c5c597cd416b481ebb083f053e6798de, entries=150, sequenceid=195, filesize=11.9 K 2024-11-28T09:21:12,336 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/af4d4e9df8a9400989f34bd39ac7275f as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/af4d4e9df8a9400989f34bd39ac7275f 2024-11-28T09:21:12,347 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/af4d4e9df8a9400989f34bd39ac7275f, entries=150, sequenceid=195, filesize=11.9 K 2024-11-28T09:21:12,364 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/a092317c2fee477da3c8a31d47d2fc2c as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/a092317c2fee477da3c8a31d47d2fc2c 2024-11-28T09:21:12,374 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/a092317c2fee477da3c8a31d47d2fc2c, entries=150, sequenceid=195, filesize=11.9 K 2024-11-28T09:21:12,375 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for 9324112e51bee406916a385aca28ddff in 271ms, sequenceid=195, compaction requested=true 2024-11-28T09:21:12,375 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9324112e51bee406916a385aca28ddff: 2024-11-28T09:21:12,376 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9324112e51bee406916a385aca28ddff:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T09:21:12,376 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:21:12,376 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T09:21:12,376 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9324112e51bee406916a385aca28ddff:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T09:21:12,376 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:21:12,376 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9324112e51bee406916a385aca28ddff:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T09:21:12,376 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-28T09:21:12,376 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T09:21:12,378 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48812 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T09:21:12,378 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1540): 9324112e51bee406916a385aca28ddff/B is initiating minor compaction (all files) 2024-11-28T09:21:12,378 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9324112e51bee406916a385aca28ddff/B in TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:12,379 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/c90ead9366fe4b51a995061681991ac1, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/3e4debb7e31048298801393fab1b8852, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/348ee4e246a04d7698ac79dd6cedb662, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/af4d4e9df8a9400989f34bd39ac7275f] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp, totalSize=47.7 K 2024-11-28T09:21:12,379 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 51202 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T09:21:12,379 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): 9324112e51bee406916a385aca28ddff/A is initiating minor compaction (all files) 2024-11-28T09:21:12,379 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9324112e51bee406916a385aca28ddff/A in TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:12,379 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting c90ead9366fe4b51a995061681991ac1, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1732785670275 2024-11-28T09:21:12,379 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/e7ff64d653684d10a9c600f549fd9e52, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/f74483259a0041ac9413b50906cdb3c1, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/d5687b2e4d7843278bc0f46e8aed4fbb, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/c5c597cd416b481ebb083f053e6798de] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp, totalSize=50.0 K 2024-11-28T09:21:12,380 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3e4debb7e31048298801393fab1b8852, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1732785670955 2024-11-28T09:21:12,380 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting e7ff64d653684d10a9c600f549fd9e52, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1732785670275 2024-11-28T09:21:12,381 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting f74483259a0041ac9413b50906cdb3c1, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1732785670954 2024-11-28T09:21:12,382 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 348ee4e246a04d7698ac79dd6cedb662, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1732785671637 2024-11-28T09:21:12,383 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting d5687b2e4d7843278bc0f46e8aed4fbb, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1732785671637 2024-11-28T09:21:12,383 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting af4d4e9df8a9400989f34bd39ac7275f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=195, earliestPutTs=1732785671980 2024-11-28T09:21:12,383 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting c5c597cd416b481ebb083f053e6798de, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=195, earliestPutTs=1732785671980 2024-11-28T09:21:12,418 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9324112e51bee406916a385aca28ddff#B#compaction#39 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:21:12,418 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9324112e51bee406916a385aca28ddff#A#compaction#40 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:21:12,419 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/df1870d9e7d340f0b599792d86e48cfd is 50, key is test_row_0/B:col10/1732785671986/Put/seqid=0 2024-11-28T09:21:12,419 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/bbb746abcd984554a68e47fca6555c48 is 50, key is test_row_0/A:col10/1732785671986/Put/seqid=0 2024-11-28T09:21:12,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741879_1055 (size=12595) 2024-11-28T09:21:12,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 9324112e51bee406916a385aca28ddff 2024-11-28T09:21:12,440 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9324112e51bee406916a385aca28ddff 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-28T09:21:12,442 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=A 2024-11-28T09:21:12,442 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:12,442 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=B 2024-11-28T09:21:12,442 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:12,442 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=C 2024-11-28T09:21:12,442 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:12,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741878_1054 (size=12595) 2024-11-28T09:21:12,449 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/bbb746abcd984554a68e47fca6555c48 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/bbb746abcd984554a68e47fca6555c48 2024-11-28T09:21:12,452 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/2165e11f7eba40e3b1b206abf2abab25 is 50, key is test_row_0/A:col10/1732785672121/Put/seqid=0 2024-11-28T09:21:12,459 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/df1870d9e7d340f0b599792d86e48cfd as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/df1870d9e7d340f0b599792d86e48cfd 2024-11-28T09:21:12,461 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 9324112e51bee406916a385aca28ddff/A of 9324112e51bee406916a385aca28ddff into bbb746abcd984554a68e47fca6555c48(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:21:12,461 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9324112e51bee406916a385aca28ddff: 2024-11-28T09:21:12,461 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff., storeName=9324112e51bee406916a385aca28ddff/A, priority=12, startTime=1732785672376; duration=0sec 2024-11-28T09:21:12,462 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:21:12,462 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9324112e51bee406916a385aca28ddff:A 2024-11-28T09:21:12,462 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T09:21:12,465 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48812 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T09:21:12,466 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): 9324112e51bee406916a385aca28ddff/C is initiating minor compaction (all files) 2024-11-28T09:21:12,466 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9324112e51bee406916a385aca28ddff/C in TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:12,466 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/8c5c8b6ccaef48c4b48beed3863950b0, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/472fe96c02f24e7d89be5c2012fb4fa8, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/29762ee6ee7445c9b707d935759db3ec, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/a092317c2fee477da3c8a31d47d2fc2c] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp, totalSize=47.7 K 2024-11-28T09:21:12,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-28T09:21:12,472 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 8c5c8b6ccaef48c4b48beed3863950b0, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1732785670275 2024-11-28T09:21:12,473 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 472fe96c02f24e7d89be5c2012fb4fa8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1732785670955 2024-11-28T09:21:12,474 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 29762ee6ee7445c9b707d935759db3ec, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1732785671637 2024-11-28T09:21:12,475 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 9324112e51bee406916a385aca28ddff/B of 9324112e51bee406916a385aca28ddff into df1870d9e7d340f0b599792d86e48cfd(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:21:12,475 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9324112e51bee406916a385aca28ddff: 2024-11-28T09:21:12,475 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff., storeName=9324112e51bee406916a385aca28ddff/B, priority=12, startTime=1732785672376; duration=0sec 2024-11-28T09:21:12,475 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting a092317c2fee477da3c8a31d47d2fc2c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=195, earliestPutTs=1732785671980 2024-11-28T09:21:12,475 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:21:12,475 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9324112e51bee406916a385aca28ddff:B 2024-11-28T09:21:12,476 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:12,476 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-28T09:21:12,477 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:12,477 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. as already flushing 2024-11-28T09:21:12,477 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:12,477 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:12,477 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:12,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741880_1056 (size=14541) 2024-11-28T09:21:12,478 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/2165e11f7eba40e3b1b206abf2abab25 2024-11-28T09:21:12,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:12,483 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:12,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58956 deadline: 1732785732473, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:12,483 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:12,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58910 deadline: 1732785732477, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:12,491 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:12,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58950 deadline: 1732785732482, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:12,492 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:12,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58928 deadline: 1732785732482, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:12,493 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:12,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58952 deadline: 1732785732484, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:12,506 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9324112e51bee406916a385aca28ddff#C#compaction#42 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:21:12,507 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/2b3cb36e1eca460da89bd978b565c6ba is 50, key is test_row_0/C:col10/1732785671986/Put/seqid=0 2024-11-28T09:21:12,518 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/7aa09265d9a74f87a0ffa43a0a09a2d0 is 50, key is test_row_0/B:col10/1732785672121/Put/seqid=0 2024-11-28T09:21:12,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741881_1057 (size=12595) 2024-11-28T09:21:12,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741882_1058 (size=12151) 2024-11-28T09:21:12,563 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/7aa09265d9a74f87a0ffa43a0a09a2d0 2024-11-28T09:21:12,565 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/2b3cb36e1eca460da89bd978b565c6ba as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/2b3cb36e1eca460da89bd978b565c6ba 2024-11-28T09:21:12,577 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 9324112e51bee406916a385aca28ddff/C of 9324112e51bee406916a385aca28ddff into 2b3cb36e1eca460da89bd978b565c6ba(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:21:12,579 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9324112e51bee406916a385aca28ddff: 2024-11-28T09:21:12,579 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff., storeName=9324112e51bee406916a385aca28ddff/C, priority=12, startTime=1732785672376; duration=0sec 2024-11-28T09:21:12,579 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:21:12,579 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9324112e51bee406916a385aca28ddff:C 2024-11-28T09:21:12,580 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/1b10d23923e44072a695f9ea4cba8bb0 is 50, key is test_row_0/C:col10/1732785672121/Put/seqid=0 2024-11-28T09:21:12,587 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:12,587 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:12,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58956 deadline: 1732785732586, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:12,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58910 deadline: 1732785732585, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:12,595 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:12,595 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:12,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58928 deadline: 1732785732594, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:12,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58950 deadline: 1732785732593, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:12,596 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:12,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58952 deadline: 1732785732595, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:12,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741883_1059 (size=12151) 2024-11-28T09:21:12,610 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=211 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/1b10d23923e44072a695f9ea4cba8bb0 2024-11-28T09:21:12,619 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/2165e11f7eba40e3b1b206abf2abab25 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/2165e11f7eba40e3b1b206abf2abab25 2024-11-28T09:21:12,630 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:12,631 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-28T09:21:12,631 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:12,632 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. as already flushing 2024-11-28T09:21:12,632 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:12,632 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/2165e11f7eba40e3b1b206abf2abab25, entries=200, sequenceid=211, filesize=14.2 K 2024-11-28T09:21:12,632 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:12,632 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:12,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:12,634 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/7aa09265d9a74f87a0ffa43a0a09a2d0 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/7aa09265d9a74f87a0ffa43a0a09a2d0 2024-11-28T09:21:12,644 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/7aa09265d9a74f87a0ffa43a0a09a2d0, entries=150, sequenceid=211, filesize=11.9 K 2024-11-28T09:21:12,646 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/1b10d23923e44072a695f9ea4cba8bb0 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/1b10d23923e44072a695f9ea4cba8bb0 2024-11-28T09:21:12,653 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/1b10d23923e44072a695f9ea4cba8bb0, entries=150, sequenceid=211, filesize=11.9 K 2024-11-28T09:21:12,655 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 9324112e51bee406916a385aca28ddff in 216ms, sequenceid=211, compaction requested=false 2024-11-28T09:21:12,655 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9324112e51bee406916a385aca28ddff: 2024-11-28T09:21:12,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-28T09:21:12,784 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:12,785 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-28T09:21:12,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:12,785 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2837): Flushing 9324112e51bee406916a385aca28ddff 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-28T09:21:12,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=A 2024-11-28T09:21:12,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:12,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=B 2024-11-28T09:21:12,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:12,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=C 2024-11-28T09:21:12,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:12,794 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/ad19c5c674934ab3af12c773abf1670f is 50, key is test_row_0/A:col10/1732785672474/Put/seqid=0 2024-11-28T09:21:12,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 9324112e51bee406916a385aca28ddff 2024-11-28T09:21:12,797 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. as already flushing 2024-11-28T09:21:12,813 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:12,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58928 deadline: 1732785732808, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:12,815 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:12,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58956 deadline: 1732785732810, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:12,816 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:12,816 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:12,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58950 deadline: 1732785732811, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:12,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58910 deadline: 1732785732813, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:12,818 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:12,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58952 deadline: 1732785732814, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:12,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741884_1060 (size=12151) 2024-11-28T09:21:12,829 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=235 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/ad19c5c674934ab3af12c773abf1670f 2024-11-28T09:21:12,854 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/8172e3078f934ec19e1b860390020f01 is 50, key is test_row_0/B:col10/1732785672474/Put/seqid=0 2024-11-28T09:21:12,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741885_1061 (size=12151) 2024-11-28T09:21:12,916 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:12,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58928 deadline: 1732785732915, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:12,920 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:12,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58910 deadline: 1732785732919, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:12,920 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:12,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58950 deadline: 1732785732919, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:12,921 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:12,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58952 deadline: 1732785732919, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:12,921 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:12,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58956 deadline: 1732785732921, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:13,121 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:13,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58928 deadline: 1732785733119, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:13,125 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:13,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58950 deadline: 1732785733123, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:13,125 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:13,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58910 deadline: 1732785733123, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:13,126 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:13,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58952 deadline: 1732785733124, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:13,126 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:13,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58956 deadline: 1732785733125, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:13,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-28T09:21:13,280 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=235 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/8172e3078f934ec19e1b860390020f01 2024-11-28T09:21:13,294 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/594decbcdfc149caab544833146f1749 is 50, key is test_row_0/C:col10/1732785672474/Put/seqid=0 2024-11-28T09:21:13,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741886_1062 (size=12151) 2024-11-28T09:21:13,426 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:13,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58928 deadline: 1732785733424, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:13,427 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:13,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58950 deadline: 1732785733427, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:13,428 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:13,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58910 deadline: 1732785733428, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:13,432 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:13,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58956 deadline: 1732785733429, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:13,433 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:13,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58952 deadline: 1732785733430, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:13,742 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=235 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/594decbcdfc149caab544833146f1749 2024-11-28T09:21:13,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/ad19c5c674934ab3af12c773abf1670f as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/ad19c5c674934ab3af12c773abf1670f 2024-11-28T09:21:13,762 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/ad19c5c674934ab3af12c773abf1670f, entries=150, sequenceid=235, filesize=11.9 K 2024-11-28T09:21:13,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/8172e3078f934ec19e1b860390020f01 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/8172e3078f934ec19e1b860390020f01 2024-11-28T09:21:13,772 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/8172e3078f934ec19e1b860390020f01, entries=150, sequenceid=235, filesize=11.9 K 2024-11-28T09:21:13,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/594decbcdfc149caab544833146f1749 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/594decbcdfc149caab544833146f1749 2024-11-28T09:21:13,788 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/594decbcdfc149caab544833146f1749, entries=150, sequenceid=235, filesize=11.9 K 2024-11-28T09:21:13,790 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for 9324112e51bee406916a385aca28ddff in 1005ms, sequenceid=235, compaction requested=true 2024-11-28T09:21:13,790 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2538): Flush status journal for 9324112e51bee406916a385aca28ddff: 2024-11-28T09:21:13,790 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:13,790 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=21 2024-11-28T09:21:13,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4106): Remote procedure done, pid=21 2024-11-28T09:21:13,797 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=21, resume processing ppid=20 2024-11-28T09:21:13,798 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=21, ppid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6240 sec 2024-11-28T09:21:13,800 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees in 1.6400 sec 2024-11-28T09:21:13,932 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9324112e51bee406916a385aca28ddff 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-28T09:21:13,932 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=A 2024-11-28T09:21:13,933 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:13,933 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=B 2024-11-28T09:21:13,933 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:13,933 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=C 2024-11-28T09:21:13,933 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:13,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 9324112e51bee406916a385aca28ddff 2024-11-28T09:21:13,959 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/4b49c21658d8467f974b4b395d68604c is 50, key is test_row_0/A:col10/1732785672808/Put/seqid=0 2024-11-28T09:21:13,974 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:13,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58910 deadline: 1732785733965, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:13,975 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:13,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58956 deadline: 1732785733966, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:13,975 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:13,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58928 deadline: 1732785733965, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:13,976 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:13,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58950 deadline: 1732785733971, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:13,977 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:13,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58952 deadline: 1732785733974, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:13,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741887_1063 (size=14541) 2024-11-28T09:21:14,080 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:14,080 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:14,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58950 deadline: 1732785734078, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:14,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58910 deadline: 1732785734078, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:14,081 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:14,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58956 deadline: 1732785734078, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:14,082 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:14,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58928 deadline: 1732785734078, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:14,082 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:14,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58952 deadline: 1732785734079, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:14,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-28T09:21:14,272 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 20 completed 2024-11-28T09:21:14,274 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T09:21:14,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] procedure2.ProcedureExecutor(1098): Stored pid=22, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees 2024-11-28T09:21:14,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-28T09:21:14,277 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=22, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T09:21:14,278 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=22, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T09:21:14,278 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=23, ppid=22, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T09:21:14,283 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:14,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58950 deadline: 1732785734282, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:14,284 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:14,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58928 deadline: 1732785734283, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:14,285 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:14,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58910 deadline: 1732785734283, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:14,285 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:14,285 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:14,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58952 deadline: 1732785734284, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:14,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58956 deadline: 1732785734284, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:14,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-28T09:21:14,383 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=251 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/4b49c21658d8467f974b4b395d68604c 2024-11-28T09:21:14,396 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/03fd2e0092c042469fb4c71bdf97dd36 is 50, key is test_row_0/B:col10/1732785672808/Put/seqid=0 2024-11-28T09:21:14,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741888_1064 (size=12151) 2024-11-28T09:21:14,423 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=251 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/03fd2e0092c042469fb4c71bdf97dd36 2024-11-28T09:21:14,431 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:14,432 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-28T09:21:14,432 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:14,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. as already flushing 2024-11-28T09:21:14,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:14,433 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:14,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:14,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:14,444 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/5ece68736174432482fe3a690869c169 is 50, key is test_row_0/C:col10/1732785672808/Put/seqid=0 2024-11-28T09:21:14,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741889_1065 (size=12151) 2024-11-28T09:21:14,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-28T09:21:14,586 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:14,587 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-28T09:21:14,588 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:14,588 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. as already flushing 2024-11-28T09:21:14,588 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:14,588 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:14,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58952 deadline: 1732785734587, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:14,588 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:14,588 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:14,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58928 deadline: 1732785734587, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:14,588 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:14,589 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:14,589 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:14,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58910 deadline: 1732785734588, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:14,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58950 deadline: 1732785734589, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:14,590 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:14,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58956 deadline: 1732785734589, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:14,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:14,742 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:14,743 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-28T09:21:14,743 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:14,743 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. as already flushing 2024-11-28T09:21:14,743 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:14,743 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:14,743 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:14,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:14,853 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=251 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/5ece68736174432482fe3a690869c169 2024-11-28T09:21:14,861 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/4b49c21658d8467f974b4b395d68604c as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/4b49c21658d8467f974b4b395d68604c 2024-11-28T09:21:14,870 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/4b49c21658d8467f974b4b395d68604c, entries=200, sequenceid=251, filesize=14.2 K 2024-11-28T09:21:14,871 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/03fd2e0092c042469fb4c71bdf97dd36 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/03fd2e0092c042469fb4c71bdf97dd36 2024-11-28T09:21:14,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-28T09:21:14,882 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/03fd2e0092c042469fb4c71bdf97dd36, entries=150, sequenceid=251, filesize=11.9 K 2024-11-28T09:21:14,884 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/5ece68736174432482fe3a690869c169 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/5ece68736174432482fe3a690869c169 2024-11-28T09:21:14,890 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/5ece68736174432482fe3a690869c169, entries=150, sequenceid=251, filesize=11.9 K 2024-11-28T09:21:14,892 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for 9324112e51bee406916a385aca28ddff in 960ms, sequenceid=251, compaction requested=true 2024-11-28T09:21:14,892 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9324112e51bee406916a385aca28ddff: 2024-11-28T09:21:14,892 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9324112e51bee406916a385aca28ddff:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T09:21:14,892 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:21:14,892 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9324112e51bee406916a385aca28ddff:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T09:21:14,892 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:21:14,892 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T09:21:14,892 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9324112e51bee406916a385aca28ddff:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T09:21:14,892 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T09:21:14,892 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:21:14,894 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49048 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T09:21:14,894 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 53828 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T09:21:14,894 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1540): 9324112e51bee406916a385aca28ddff/A is initiating minor compaction (all files) 2024-11-28T09:21:14,894 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): 9324112e51bee406916a385aca28ddff/B is initiating minor compaction (all files) 2024-11-28T09:21:14,894 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9324112e51bee406916a385aca28ddff/A in TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:14,894 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9324112e51bee406916a385aca28ddff/B in TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:14,895 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/bbb746abcd984554a68e47fca6555c48, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/2165e11f7eba40e3b1b206abf2abab25, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/ad19c5c674934ab3af12c773abf1670f, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/4b49c21658d8467f974b4b395d68604c] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp, totalSize=52.6 K 2024-11-28T09:21:14,895 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/df1870d9e7d340f0b599792d86e48cfd, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/7aa09265d9a74f87a0ffa43a0a09a2d0, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/8172e3078f934ec19e1b860390020f01, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/03fd2e0092c042469fb4c71bdf97dd36] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp, totalSize=47.9 K 2024-11-28T09:21:14,895 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting bbb746abcd984554a68e47fca6555c48, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=195, earliestPutTs=1732785671980 2024-11-28T09:21:14,895 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting df1870d9e7d340f0b599792d86e48cfd, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=195, earliestPutTs=1732785671980 2024-11-28T09:21:14,896 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 7aa09265d9a74f87a0ffa43a0a09a2d0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1732785672117 2024-11-28T09:21:14,896 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2165e11f7eba40e3b1b206abf2abab25, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1732785672117 2024-11-28T09:21:14,896 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:14,896 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 8172e3078f934ec19e1b860390020f01, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=235, earliestPutTs=1732785672474 2024-11-28T09:21:14,896 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting ad19c5c674934ab3af12c773abf1670f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=235, earliestPutTs=1732785672474 2024-11-28T09:21:14,897 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 03fd2e0092c042469fb4c71bdf97dd36, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1732785672808 2024-11-28T09:21:14,897 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-28T09:21:14,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:14,897 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2837): Flushing 9324112e51bee406916a385aca28ddff 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-28T09:21:14,898 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=A 2024-11-28T09:21:14,898 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:14,898 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=B 2024-11-28T09:21:14,898 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:14,898 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=C 2024-11-28T09:21:14,898 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:14,899 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4b49c21658d8467f974b4b395d68604c, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1732785672808 2024-11-28T09:21:14,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/ffa8316651904c74863181b981a12aa8 is 50, key is test_row_0/A:col10/1732785673972/Put/seqid=0 2024-11-28T09:21:14,915 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9324112e51bee406916a385aca28ddff#A#compaction#52 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:21:14,916 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/82b20c17b539492582220cafb69aa353 is 50, key is test_row_0/A:col10/1732785672808/Put/seqid=0 2024-11-28T09:21:14,917 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9324112e51bee406916a385aca28ddff#B#compaction#53 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:21:14,918 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/bbbeddef82eb47828c30fd3faa14c373 is 50, key is test_row_0/B:col10/1732785672808/Put/seqid=0 2024-11-28T09:21:14,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741890_1066 (size=12301) 2024-11-28T09:21:14,921 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=271 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/ffa8316651904c74863181b981a12aa8 2024-11-28T09:21:14,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741891_1067 (size=12731) 2024-11-28T09:21:14,947 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/20b375872730489599645fe902432acd is 50, key is test_row_0/B:col10/1732785673972/Put/seqid=0 2024-11-28T09:21:14,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741892_1068 (size=12731) 2024-11-28T09:21:14,960 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/82b20c17b539492582220cafb69aa353 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/82b20c17b539492582220cafb69aa353 2024-11-28T09:21:14,960 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/bbbeddef82eb47828c30fd3faa14c373 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/bbbeddef82eb47828c30fd3faa14c373 2024-11-28T09:21:14,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741893_1069 (size=12301) 2024-11-28T09:21:14,970 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 9324112e51bee406916a385aca28ddff/A of 9324112e51bee406916a385aca28ddff into 82b20c17b539492582220cafb69aa353(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:21:14,970 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9324112e51bee406916a385aca28ddff: 2024-11-28T09:21:14,970 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff., storeName=9324112e51bee406916a385aca28ddff/A, priority=12, startTime=1732785674892; duration=0sec 2024-11-28T09:21:14,970 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:21:14,970 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9324112e51bee406916a385aca28ddff:A 2024-11-28T09:21:14,970 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=271 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/20b375872730489599645fe902432acd 2024-11-28T09:21:14,970 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T09:21:14,971 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 9324112e51bee406916a385aca28ddff/B of 9324112e51bee406916a385aca28ddff into bbbeddef82eb47828c30fd3faa14c373(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:21:14,971 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9324112e51bee406916a385aca28ddff: 2024-11-28T09:21:14,971 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff., storeName=9324112e51bee406916a385aca28ddff/B, priority=12, startTime=1732785674892; duration=0sec 2024-11-28T09:21:14,971 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:21:14,971 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9324112e51bee406916a385aca28ddff:B 2024-11-28T09:21:14,975 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49048 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T09:21:14,975 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1540): 9324112e51bee406916a385aca28ddff/C is initiating minor compaction (all files) 2024-11-28T09:21:14,975 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9324112e51bee406916a385aca28ddff/C in TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:14,975 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/2b3cb36e1eca460da89bd978b565c6ba, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/1b10d23923e44072a695f9ea4cba8bb0, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/594decbcdfc149caab544833146f1749, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/5ece68736174432482fe3a690869c169] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp, totalSize=47.9 K 2024-11-28T09:21:14,976 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2b3cb36e1eca460da89bd978b565c6ba, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=195, earliestPutTs=1732785671980 2024-11-28T09:21:14,976 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1b10d23923e44072a695f9ea4cba8bb0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=211, earliestPutTs=1732785672117 2024-11-28T09:21:14,977 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 594decbcdfc149caab544833146f1749, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=235, earliestPutTs=1732785672474 2024-11-28T09:21:14,977 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5ece68736174432482fe3a690869c169, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1732785672808 2024-11-28T09:21:14,984 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/c5472e8b324340b396ba63225ee3ddcb is 50, key is test_row_0/C:col10/1732785673972/Put/seqid=0 2024-11-28T09:21:15,000 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9324112e51bee406916a385aca28ddff#C#compaction#56 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:21:15,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741894_1070 (size=12301) 2024-11-28T09:21:15,003 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/80c9368eeef741b7a07e81d92b46ead2 is 50, key is test_row_0/C:col10/1732785672808/Put/seqid=0 2024-11-28T09:21:15,003 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=271 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/c5472e8b324340b396ba63225ee3ddcb 2024-11-28T09:21:15,012 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/ffa8316651904c74863181b981a12aa8 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/ffa8316651904c74863181b981a12aa8 2024-11-28T09:21:15,020 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/ffa8316651904c74863181b981a12aa8, entries=150, sequenceid=271, filesize=12.0 K 2024-11-28T09:21:15,023 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/20b375872730489599645fe902432acd as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/20b375872730489599645fe902432acd 2024-11-28T09:21:15,033 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/20b375872730489599645fe902432acd, entries=150, sequenceid=271, filesize=12.0 K 2024-11-28T09:21:15,042 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/c5472e8b324340b396ba63225ee3ddcb as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/c5472e8b324340b396ba63225ee3ddcb 2024-11-28T09:21:15,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741895_1071 (size=12731) 2024-11-28T09:21:15,053 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/c5472e8b324340b396ba63225ee3ddcb, entries=150, sequenceid=271, filesize=12.0 K 2024-11-28T09:21:15,057 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=0 B/0 for 9324112e51bee406916a385aca28ddff in 160ms, sequenceid=271, compaction requested=false 2024-11-28T09:21:15,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2538): Flush status journal for 9324112e51bee406916a385aca28ddff: 2024-11-28T09:21:15,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:15,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=23 2024-11-28T09:21:15,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4106): Remote procedure done, pid=23 2024-11-28T09:21:15,059 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/80c9368eeef741b7a07e81d92b46ead2 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/80c9368eeef741b7a07e81d92b46ead2 2024-11-28T09:21:15,062 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=23, resume processing ppid=22 2024-11-28T09:21:15,062 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=23, ppid=22, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 782 msec 2024-11-28T09:21:15,064 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=22, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees in 789 msec 2024-11-28T09:21:15,067 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 9324112e51bee406916a385aca28ddff/C of 9324112e51bee406916a385aca28ddff into 80c9368eeef741b7a07e81d92b46ead2(size=12.4 K), total size for store is 24.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:21:15,067 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9324112e51bee406916a385aca28ddff: 2024-11-28T09:21:15,067 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff., storeName=9324112e51bee406916a385aca28ddff/C, priority=12, startTime=1732785674892; duration=0sec 2024-11-28T09:21:15,068 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:21:15,068 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9324112e51bee406916a385aca28ddff:C 2024-11-28T09:21:15,110 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9324112e51bee406916a385aca28ddff 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-28T09:21:15,110 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=A 2024-11-28T09:21:15,111 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:15,111 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=B 2024-11-28T09:21:15,111 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:15,111 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=C 2024-11-28T09:21:15,111 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:15,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 9324112e51bee406916a385aca28ddff 2024-11-28T09:21:15,140 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/48837577ee0740b3bb2aba337d3b20ce is 50, key is test_row_0/A:col10/1732785675106/Put/seqid=0 2024-11-28T09:21:15,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741896_1072 (size=17181) 2024-11-28T09:21:15,156 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=285 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/48837577ee0740b3bb2aba337d3b20ce 2024-11-28T09:21:15,188 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/40e1776815e44a81b228b117e667e7c1 is 50, key is test_row_0/B:col10/1732785675106/Put/seqid=0 2024-11-28T09:21:15,188 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:15,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58950 deadline: 1732785735170, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:15,189 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:15,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58956 deadline: 1732785735172, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:15,191 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:15,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58952 deadline: 1732785735181, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:15,199 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:15,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58928 deadline: 1732785735187, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:15,201 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:15,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58910 deadline: 1732785735189, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:15,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741897_1073 (size=12301) 2024-11-28T09:21:15,227 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=285 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/40e1776815e44a81b228b117e667e7c1 2024-11-28T09:21:15,244 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/92a7df0f49004adba40363d002a3572c is 50, key is test_row_0/C:col10/1732785675106/Put/seqid=0 2024-11-28T09:21:15,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741898_1074 (size=12301) 2024-11-28T09:21:15,265 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=285 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/92a7df0f49004adba40363d002a3572c 2024-11-28T09:21:15,273 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/48837577ee0740b3bb2aba337d3b20ce as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/48837577ee0740b3bb2aba337d3b20ce 2024-11-28T09:21:15,285 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/48837577ee0740b3bb2aba337d3b20ce, entries=250, sequenceid=285, filesize=16.8 K 2024-11-28T09:21:15,288 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/40e1776815e44a81b228b117e667e7c1 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/40e1776815e44a81b228b117e667e7c1 2024-11-28T09:21:15,296 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/40e1776815e44a81b228b117e667e7c1, entries=150, sequenceid=285, filesize=12.0 K 2024-11-28T09:21:15,297 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:15,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58956 deadline: 1732785735292, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:15,297 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/92a7df0f49004adba40363d002a3572c as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/92a7df0f49004adba40363d002a3572c 2024-11-28T09:21:15,298 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:15,298 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:15,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58950 deadline: 1732785735292, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:15,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58952 deadline: 1732785735292, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:15,307 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:15,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58928 deadline: 1732785735301, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:15,308 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:15,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58910 deadline: 1732785735303, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:15,312 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/92a7df0f49004adba40363d002a3572c, entries=150, sequenceid=285, filesize=12.0 K 2024-11-28T09:21:15,313 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 9324112e51bee406916a385aca28ddff in 203ms, sequenceid=285, compaction requested=true 2024-11-28T09:21:15,313 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9324112e51bee406916a385aca28ddff: 2024-11-28T09:21:15,313 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9324112e51bee406916a385aca28ddff:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T09:21:15,313 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:21:15,313 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:21:15,313 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9324112e51bee406916a385aca28ddff:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T09:21:15,313 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:21:15,314 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9324112e51bee406916a385aca28ddff:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T09:21:15,314 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:21:15,314 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:21:15,315 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:21:15,315 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 42213 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:21:15,315 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): 9324112e51bee406916a385aca28ddff/B is initiating minor compaction (all files) 2024-11-28T09:21:15,315 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1540): 9324112e51bee406916a385aca28ddff/A is initiating minor compaction (all files) 2024-11-28T09:21:15,315 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9324112e51bee406916a385aca28ddff/B in TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:15,315 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9324112e51bee406916a385aca28ddff/A in TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:15,315 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/bbbeddef82eb47828c30fd3faa14c373, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/20b375872730489599645fe902432acd, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/40e1776815e44a81b228b117e667e7c1] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp, totalSize=36.5 K 2024-11-28T09:21:15,315 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/82b20c17b539492582220cafb69aa353, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/ffa8316651904c74863181b981a12aa8, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/48837577ee0740b3bb2aba337d3b20ce] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp, totalSize=41.2 K 2024-11-28T09:21:15,316 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting bbbeddef82eb47828c30fd3faa14c373, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1732785672808 2024-11-28T09:21:15,316 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 82b20c17b539492582220cafb69aa353, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1732785672808 2024-11-28T09:21:15,316 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 20b375872730489599645fe902432acd, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=271, earliestPutTs=1732785673962 2024-11-28T09:21:15,316 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting ffa8316651904c74863181b981a12aa8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=271, earliestPutTs=1732785673962 2024-11-28T09:21:15,317 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 40e1776815e44a81b228b117e667e7c1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=285, earliestPutTs=1732785675101 2024-11-28T09:21:15,317 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 48837577ee0740b3bb2aba337d3b20ce, keycount=250, bloomtype=ROW, size=16.8 K, encoding=NONE, compression=NONE, seqNum=285, earliestPutTs=1732785675099 2024-11-28T09:21:15,342 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9324112e51bee406916a385aca28ddff#B#compaction#60 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:21:15,343 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/aabee2ff60874d59a521034961d9b711 is 50, key is test_row_0/B:col10/1732785675106/Put/seqid=0 2024-11-28T09:21:15,356 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9324112e51bee406916a385aca28ddff#A#compaction#61 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:21:15,357 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/ad7dade332994af6b86152fdb98af785 is 50, key is test_row_0/A:col10/1732785675106/Put/seqid=0 2024-11-28T09:21:15,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741899_1075 (size=12983) 2024-11-28T09:21:15,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741900_1076 (size=12983) 2024-11-28T09:21:15,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-28T09:21:15,381 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 22 completed 2024-11-28T09:21:15,384 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T09:21:15,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] procedure2.ProcedureExecutor(1098): Stored pid=24, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees 2024-11-28T09:21:15,386 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=24, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T09:21:15,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-28T09:21:15,387 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=24, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T09:21:15,387 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=25, ppid=24, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T09:21:15,398 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/aabee2ff60874d59a521034961d9b711 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/aabee2ff60874d59a521034961d9b711 2024-11-28T09:21:15,398 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/ad7dade332994af6b86152fdb98af785 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/ad7dade332994af6b86152fdb98af785 2024-11-28T09:21:15,407 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9324112e51bee406916a385aca28ddff/B of 9324112e51bee406916a385aca28ddff into aabee2ff60874d59a521034961d9b711(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:21:15,407 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9324112e51bee406916a385aca28ddff: 2024-11-28T09:21:15,407 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff., storeName=9324112e51bee406916a385aca28ddff/B, priority=13, startTime=1732785675313; duration=0sec 2024-11-28T09:21:15,407 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:21:15,407 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9324112e51bee406916a385aca28ddff:B 2024-11-28T09:21:15,407 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:21:15,409 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:21:15,409 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): 9324112e51bee406916a385aca28ddff/C is initiating minor compaction (all files) 2024-11-28T09:21:15,409 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9324112e51bee406916a385aca28ddff/C in TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:15,409 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/80c9368eeef741b7a07e81d92b46ead2, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/c5472e8b324340b396ba63225ee3ddcb, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/92a7df0f49004adba40363d002a3572c] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp, totalSize=36.5 K 2024-11-28T09:21:15,410 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 80c9368eeef741b7a07e81d92b46ead2, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1732785672808 2024-11-28T09:21:15,410 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting c5472e8b324340b396ba63225ee3ddcb, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=271, earliestPutTs=1732785673962 2024-11-28T09:21:15,411 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 92a7df0f49004adba40363d002a3572c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=285, earliestPutTs=1732785675101 2024-11-28T09:21:15,412 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9324112e51bee406916a385aca28ddff/A of 9324112e51bee406916a385aca28ddff into ad7dade332994af6b86152fdb98af785(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:21:15,412 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9324112e51bee406916a385aca28ddff: 2024-11-28T09:21:15,412 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff., storeName=9324112e51bee406916a385aca28ddff/A, priority=13, startTime=1732785675313; duration=0sec 2024-11-28T09:21:15,412 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:21:15,412 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9324112e51bee406916a385aca28ddff:A 2024-11-28T09:21:15,433 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9324112e51bee406916a385aca28ddff#C#compaction#62 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:21:15,434 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/dfcd54593fa14e899c96acca27e6ebb0 is 50, key is test_row_0/C:col10/1732785675106/Put/seqid=0 2024-11-28T09:21:15,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741901_1077 (size=12983) 2024-11-28T09:21:15,474 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/dfcd54593fa14e899c96acca27e6ebb0 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/dfcd54593fa14e899c96acca27e6ebb0 2024-11-28T09:21:15,484 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9324112e51bee406916a385aca28ddff/C of 9324112e51bee406916a385aca28ddff into dfcd54593fa14e899c96acca27e6ebb0(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:21:15,484 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9324112e51bee406916a385aca28ddff: 2024-11-28T09:21:15,484 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff., storeName=9324112e51bee406916a385aca28ddff/C, priority=13, startTime=1732785675313; duration=0sec 2024-11-28T09:21:15,484 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:21:15,484 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9324112e51bee406916a385aca28ddff:C 2024-11-28T09:21:15,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-28T09:21:15,504 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9324112e51bee406916a385aca28ddff 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-28T09:21:15,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 9324112e51bee406916a385aca28ddff 2024-11-28T09:21:15,505 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=A 2024-11-28T09:21:15,505 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:15,505 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=B 2024-11-28T09:21:15,505 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:15,505 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=C 2024-11-28T09:21:15,506 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:15,513 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/e5c8c911279b47ada50487b0f0e6196d is 50, key is test_row_0/A:col10/1732785675187/Put/seqid=0 2024-11-28T09:21:15,517 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:15,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58956 deadline: 1732785735514, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:15,520 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:15,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58952 deadline: 1732785735515, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:15,521 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:15,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58928 deadline: 1732785735516, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:15,522 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:15,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58910 deadline: 1732785735516, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:15,522 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:15,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58950 deadline: 1732785735517, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:15,540 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:15,541 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-28T09:21:15,541 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:15,541 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. as already flushing 2024-11-28T09:21:15,542 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:15,542 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:15,542 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:15,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:15,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741902_1078 (size=14741) 2024-11-28T09:21:15,558 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=316 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/e5c8c911279b47ada50487b0f0e6196d 2024-11-28T09:21:15,582 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/b5e6fb1317bd45ea98f97c5ad4903916 is 50, key is test_row_0/B:col10/1732785675187/Put/seqid=0 2024-11-28T09:21:15,620 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:15,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58956 deadline: 1732785735619, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:15,625 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:15,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58952 deadline: 1732785735622, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:15,625 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:15,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58928 deadline: 1732785735623, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:15,627 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:15,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58910 deadline: 1732785735623, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:15,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741903_1079 (size=12301) 2024-11-28T09:21:15,628 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:15,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58950 deadline: 1732785735624, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:15,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-28T09:21:15,695 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:15,695 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-28T09:21:15,696 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:15,696 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. as already flushing 2024-11-28T09:21:15,696 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:15,696 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:15,696 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:15,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:15,825 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:15,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58956 deadline: 1732785735825, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:15,829 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:15,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58928 deadline: 1732785735829, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:15,832 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:15,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58950 deadline: 1732785735831, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:15,836 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:15,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58952 deadline: 1732785735836, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:15,837 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:15,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58910 deadline: 1732785735837, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:15,850 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:15,850 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-28T09:21:15,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:15,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. as already flushing 2024-11-28T09:21:15,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:15,851 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:15,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:15,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:15,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-28T09:21:16,005 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:16,006 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-28T09:21:16,006 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:16,006 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. as already flushing 2024-11-28T09:21:16,006 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:16,006 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:16,006 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:16,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:16,029 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=316 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/b5e6fb1317bd45ea98f97c5ad4903916 2024-11-28T09:21:16,041 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/ec43509c63444c739fa804ffd4eccbc2 is 50, key is test_row_0/C:col10/1732785675187/Put/seqid=0 2024-11-28T09:21:16,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741904_1080 (size=12301) 2024-11-28T09:21:16,063 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=316 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/ec43509c63444c739fa804ffd4eccbc2 2024-11-28T09:21:16,072 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/e5c8c911279b47ada50487b0f0e6196d as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/e5c8c911279b47ada50487b0f0e6196d 2024-11-28T09:21:16,082 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/e5c8c911279b47ada50487b0f0e6196d, entries=200, sequenceid=316, filesize=14.4 K 2024-11-28T09:21:16,083 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/b5e6fb1317bd45ea98f97c5ad4903916 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/b5e6fb1317bd45ea98f97c5ad4903916 2024-11-28T09:21:16,090 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/b5e6fb1317bd45ea98f97c5ad4903916, entries=150, sequenceid=316, filesize=12.0 K 2024-11-28T09:21:16,091 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/ec43509c63444c739fa804ffd4eccbc2 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/ec43509c63444c739fa804ffd4eccbc2 2024-11-28T09:21:16,097 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/ec43509c63444c739fa804ffd4eccbc2, entries=150, sequenceid=316, filesize=12.0 K 2024-11-28T09:21:16,098 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=46.96 KB/48090 for 9324112e51bee406916a385aca28ddff in 595ms, sequenceid=316, compaction requested=false 2024-11-28T09:21:16,099 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9324112e51bee406916a385aca28ddff: 2024-11-28T09:21:16,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 9324112e51bee406916a385aca28ddff 2024-11-28T09:21:16,130 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9324112e51bee406916a385aca28ddff 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-28T09:21:16,130 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=A 2024-11-28T09:21:16,130 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:16,130 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=B 2024-11-28T09:21:16,130 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:16,131 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=C 2024-11-28T09:21:16,131 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:16,146 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/5b6c2145bf6a47ec81cb0efb1c001e29 is 50, key is test_row_0/A:col10/1732785675512/Put/seqid=0 2024-11-28T09:21:16,159 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:16,159 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-28T09:21:16,159 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:16,160 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. as already flushing 2024-11-28T09:21:16,160 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:16,160 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:16,160 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:16,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:16,176 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:16,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58952 deadline: 1732785736171, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:16,177 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:16,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58910 deadline: 1732785736172, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:16,178 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:16,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58950 deadline: 1732785736173, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:16,182 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:16,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58956 deadline: 1732785736176, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:16,183 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:16,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58928 deadline: 1732785736177, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:16,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741905_1081 (size=14741) 2024-11-28T09:21:16,188 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=327 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/5b6c2145bf6a47ec81cb0efb1c001e29 2024-11-28T09:21:16,210 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/73acd2c7c6da4ccc99151657ead94482 is 50, key is test_row_0/B:col10/1732785675512/Put/seqid=0 2024-11-28T09:21:16,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741906_1082 (size=12301) 2024-11-28T09:21:16,235 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=327 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/73acd2c7c6da4ccc99151657ead94482 2024-11-28T09:21:16,248 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/8b2935459e1b47d6b912e9736dd81aaf is 50, key is test_row_0/C:col10/1732785675512/Put/seqid=0 2024-11-28T09:21:16,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741907_1083 (size=12301) 2024-11-28T09:21:16,277 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=327 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/8b2935459e1b47d6b912e9736dd81aaf 2024-11-28T09:21:16,282 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:16,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58910 deadline: 1732785736279, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:16,283 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:16,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58952 deadline: 1732785736279, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:16,284 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:16,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58950 deadline: 1732785736280, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:16,285 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:16,285 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/5b6c2145bf6a47ec81cb0efb1c001e29 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/5b6c2145bf6a47ec81cb0efb1c001e29 2024-11-28T09:21:16,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58956 deadline: 1732785736284, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:16,286 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:16,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58928 deadline: 1732785736285, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:16,294 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/5b6c2145bf6a47ec81cb0efb1c001e29, entries=200, sequenceid=327, filesize=14.4 K 2024-11-28T09:21:16,296 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/73acd2c7c6da4ccc99151657ead94482 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/73acd2c7c6da4ccc99151657ead94482 2024-11-28T09:21:16,304 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/73acd2c7c6da4ccc99151657ead94482, entries=150, sequenceid=327, filesize=12.0 K 2024-11-28T09:21:16,306 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/8b2935459e1b47d6b912e9736dd81aaf as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/8b2935459e1b47d6b912e9736dd81aaf 2024-11-28T09:21:16,313 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:16,314 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-28T09:21:16,314 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/8b2935459e1b47d6b912e9736dd81aaf, entries=150, sequenceid=327, filesize=12.0 K 2024-11-28T09:21:16,314 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:16,314 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. as already flushing 2024-11-28T09:21:16,314 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:16,314 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:16,314 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:16,315 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for 9324112e51bee406916a385aca28ddff in 185ms, sequenceid=327, compaction requested=true 2024-11-28T09:21:16,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:16,316 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9324112e51bee406916a385aca28ddff: 2024-11-28T09:21:16,316 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:21:16,316 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9324112e51bee406916a385aca28ddff:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T09:21:16,316 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:21:16,317 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:21:16,317 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 42465 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:21:16,318 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): 9324112e51bee406916a385aca28ddff/A is initiating minor compaction (all files) 2024-11-28T09:21:16,318 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9324112e51bee406916a385aca28ddff/A in TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:16,318 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/ad7dade332994af6b86152fdb98af785, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/e5c8c911279b47ada50487b0f0e6196d, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/5b6c2145bf6a47ec81cb0efb1c001e29] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp, totalSize=41.5 K 2024-11-28T09:21:16,318 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9324112e51bee406916a385aca28ddff:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T09:21:16,318 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:21:16,318 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9324112e51bee406916a385aca28ddff:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T09:21:16,319 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:21:16,319 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting ad7dade332994af6b86152fdb98af785, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=285, earliestPutTs=1732785675101 2024-11-28T09:21:16,319 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:21:16,320 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1540): 9324112e51bee406916a385aca28ddff/B is initiating minor compaction (all files) 2024-11-28T09:21:16,320 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9324112e51bee406916a385aca28ddff/B in TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:16,320 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/aabee2ff60874d59a521034961d9b711, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/b5e6fb1317bd45ea98f97c5ad4903916, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/73acd2c7c6da4ccc99151657ead94482] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp, totalSize=36.7 K 2024-11-28T09:21:16,320 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting aabee2ff60874d59a521034961d9b711, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=285, earliestPutTs=1732785675101 2024-11-28T09:21:16,321 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting e5c8c911279b47ada50487b0f0e6196d, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=316, earliestPutTs=1732785675170 2024-11-28T09:21:16,321 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting b5e6fb1317bd45ea98f97c5ad4903916, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=316, earliestPutTs=1732785675170 2024-11-28T09:21:16,322 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 5b6c2145bf6a47ec81cb0efb1c001e29, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=327, earliestPutTs=1732785675512 2024-11-28T09:21:16,323 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 73acd2c7c6da4ccc99151657ead94482, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=327, earliestPutTs=1732785675512 2024-11-28T09:21:16,338 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9324112e51bee406916a385aca28ddff#A#compaction#69 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:21:16,339 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/d328d7f0c2584593bc921fe9950dab16 is 50, key is test_row_0/A:col10/1732785675512/Put/seqid=0 2024-11-28T09:21:16,346 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9324112e51bee406916a385aca28ddff#B#compaction#70 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:21:16,346 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/ecb98be753854a5085a65df5bd006166 is 50, key is test_row_0/B:col10/1732785675512/Put/seqid=0 2024-11-28T09:21:16,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741908_1084 (size=13085) 2024-11-28T09:21:16,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741909_1085 (size=13085) 2024-11-28T09:21:16,362 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/d328d7f0c2584593bc921fe9950dab16 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/d328d7f0c2584593bc921fe9950dab16 2024-11-28T09:21:16,370 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9324112e51bee406916a385aca28ddff/A of 9324112e51bee406916a385aca28ddff into d328d7f0c2584593bc921fe9950dab16(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:21:16,370 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9324112e51bee406916a385aca28ddff: 2024-11-28T09:21:16,370 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff., storeName=9324112e51bee406916a385aca28ddff/A, priority=13, startTime=1732785676316; duration=0sec 2024-11-28T09:21:16,370 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:21:16,370 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9324112e51bee406916a385aca28ddff:A 2024-11-28T09:21:16,370 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:21:16,372 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:21:16,372 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): 9324112e51bee406916a385aca28ddff/C is initiating minor compaction (all files) 2024-11-28T09:21:16,372 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9324112e51bee406916a385aca28ddff/C in TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:16,372 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/dfcd54593fa14e899c96acca27e6ebb0, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/ec43509c63444c739fa804ffd4eccbc2, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/8b2935459e1b47d6b912e9736dd81aaf] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp, totalSize=36.7 K 2024-11-28T09:21:16,374 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting dfcd54593fa14e899c96acca27e6ebb0, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=285, earliestPutTs=1732785675101 2024-11-28T09:21:16,374 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting ec43509c63444c739fa804ffd4eccbc2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=316, earliestPutTs=1732785675170 2024-11-28T09:21:16,375 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 8b2935459e1b47d6b912e9736dd81aaf, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=327, earliestPutTs=1732785675512 2024-11-28T09:21:16,385 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9324112e51bee406916a385aca28ddff#C#compaction#71 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:21:16,386 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/2d5538daf2964794a6ca46f8c2fc9917 is 50, key is test_row_0/C:col10/1732785675512/Put/seqid=0 2024-11-28T09:21:16,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741910_1086 (size=13085) 2024-11-28T09:21:16,435 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/2d5538daf2964794a6ca46f8c2fc9917 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/2d5538daf2964794a6ca46f8c2fc9917 2024-11-28T09:21:16,443 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9324112e51bee406916a385aca28ddff/C of 9324112e51bee406916a385aca28ddff into 2d5538daf2964794a6ca46f8c2fc9917(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:21:16,443 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9324112e51bee406916a385aca28ddff: 2024-11-28T09:21:16,443 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff., storeName=9324112e51bee406916a385aca28ddff/C, priority=13, startTime=1732785676318; duration=0sec 2024-11-28T09:21:16,443 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:21:16,444 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9324112e51bee406916a385aca28ddff:C 2024-11-28T09:21:16,468 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:16,468 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-28T09:21:16,469 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:16,469 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2837): Flushing 9324112e51bee406916a385aca28ddff 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-28T09:21:16,469 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=A 2024-11-28T09:21:16,469 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:16,469 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=B 2024-11-28T09:21:16,469 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:16,469 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=C 2024-11-28T09:21:16,470 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:16,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/da83118749c94a439c19261f241693f5 is 50, key is test_row_0/A:col10/1732785676175/Put/seqid=0 2024-11-28T09:21:16,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 9324112e51bee406916a385aca28ddff 2024-11-28T09:21:16,488 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. as already flushing 2024-11-28T09:21:16,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-28T09:21:16,496 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:16,496 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:16,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58956 deadline: 1732785736492, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:16,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58950 deadline: 1732785736492, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:16,497 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:16,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58928 deadline: 1732785736493, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:16,497 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:16,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58952 deadline: 1732785736495, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:16,500 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:16,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58910 deadline: 1732785736496, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:16,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741911_1087 (size=12301) 2024-11-28T09:21:16,516 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=355 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/da83118749c94a439c19261f241693f5 2024-11-28T09:21:16,538 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/b42dd754c2294a6ca14f3daba3943932 is 50, key is test_row_0/B:col10/1732785676175/Put/seqid=0 2024-11-28T09:21:16,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741912_1088 (size=12301) 2024-11-28T09:21:16,587 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=355 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/b42dd754c2294a6ca14f3daba3943932 2024-11-28T09:21:16,602 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:16,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58956 deadline: 1732785736599, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:16,605 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:16,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58950 deadline: 1732785736600, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:16,605 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:16,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58952 deadline: 1732785736600, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:16,606 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:16,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58928 deadline: 1732785736600, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:16,606 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:16,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58910 deadline: 1732785736602, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:16,606 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/034816f3b2164364bf6ff0ef36bb7d51 is 50, key is test_row_0/C:col10/1732785676175/Put/seqid=0 2024-11-28T09:21:16,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741913_1089 (size=12301) 2024-11-28T09:21:16,641 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=355 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/034816f3b2164364bf6ff0ef36bb7d51 2024-11-28T09:21:16,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/da83118749c94a439c19261f241693f5 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/da83118749c94a439c19261f241693f5 2024-11-28T09:21:16,660 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/da83118749c94a439c19261f241693f5, entries=150, sequenceid=355, filesize=12.0 K 2024-11-28T09:21:16,663 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/b42dd754c2294a6ca14f3daba3943932 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/b42dd754c2294a6ca14f3daba3943932 2024-11-28T09:21:16,671 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/b42dd754c2294a6ca14f3daba3943932, entries=150, sequenceid=355, filesize=12.0 K 2024-11-28T09:21:16,676 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/034816f3b2164364bf6ff0ef36bb7d51 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/034816f3b2164364bf6ff0ef36bb7d51 2024-11-28T09:21:16,684 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/034816f3b2164364bf6ff0ef36bb7d51, entries=150, sequenceid=355, filesize=12.0 K 2024-11-28T09:21:16,686 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 9324112e51bee406916a385aca28ddff in 216ms, sequenceid=355, compaction requested=false 2024-11-28T09:21:16,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2538): Flush status journal for 9324112e51bee406916a385aca28ddff: 2024-11-28T09:21:16,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:16,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=25 2024-11-28T09:21:16,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4106): Remote procedure done, pid=25 2024-11-28T09:21:16,691 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=25, resume processing ppid=24 2024-11-28T09:21:16,691 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=25, ppid=24, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3010 sec 2024-11-28T09:21:16,693 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=24, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees in 1.3080 sec 2024-11-28T09:21:16,768 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/ecb98be753854a5085a65df5bd006166 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/ecb98be753854a5085a65df5bd006166 2024-11-28T09:21:16,779 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9324112e51bee406916a385aca28ddff/B of 9324112e51bee406916a385aca28ddff into ecb98be753854a5085a65df5bd006166(size=12.8 K), total size for store is 24.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:21:16,779 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9324112e51bee406916a385aca28ddff: 2024-11-28T09:21:16,779 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff., storeName=9324112e51bee406916a385aca28ddff/B, priority=13, startTime=1732785676316; duration=0sec 2024-11-28T09:21:16,779 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:21:16,779 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9324112e51bee406916a385aca28ddff:B 2024-11-28T09:21:16,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 9324112e51bee406916a385aca28ddff 2024-11-28T09:21:16,806 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9324112e51bee406916a385aca28ddff 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-28T09:21:16,806 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=A 2024-11-28T09:21:16,806 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:16,806 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=B 2024-11-28T09:21:16,806 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:16,806 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=C 2024-11-28T09:21:16,806 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:16,833 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/5e03a7525e834955ac51ac8fb62f4365 is 50, key is test_row_0/A:col10/1732785676489/Put/seqid=0 2024-11-28T09:21:16,837 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:16,837 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:16,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58910 deadline: 1732785736833, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:16,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58952 deadline: 1732785736833, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:16,840 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:16,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58956 deadline: 1732785736835, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:16,844 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:16,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58950 deadline: 1732785736837, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:16,844 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:16,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58928 deadline: 1732785736837, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:16,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741914_1090 (size=12301) 2024-11-28T09:21:16,876 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=367 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/5e03a7525e834955ac51ac8fb62f4365 2024-11-28T09:21:16,888 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/c274091a558a4b30a23858f0bab1f103 is 50, key is test_row_0/B:col10/1732785676489/Put/seqid=0 2024-11-28T09:21:16,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741915_1091 (size=12301) 2024-11-28T09:21:16,919 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=367 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/c274091a558a4b30a23858f0bab1f103 2024-11-28T09:21:16,930 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/e5c716f6ec9f4dc18f6c4a1b52bcd33f is 50, key is test_row_0/C:col10/1732785676489/Put/seqid=0 2024-11-28T09:21:16,941 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:16,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58910 deadline: 1732785736939, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:16,942 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:16,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58952 deadline: 1732785736939, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:16,943 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:16,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58956 deadline: 1732785736943, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:16,947 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:16,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58950 deadline: 1732785736945, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:16,948 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:16,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58928 deadline: 1732785736947, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:16,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741916_1092 (size=12301) 2024-11-28T09:21:16,964 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=367 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/e5c716f6ec9f4dc18f6c4a1b52bcd33f 2024-11-28T09:21:16,974 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/5e03a7525e834955ac51ac8fb62f4365 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/5e03a7525e834955ac51ac8fb62f4365 2024-11-28T09:21:16,981 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/5e03a7525e834955ac51ac8fb62f4365, entries=150, sequenceid=367, filesize=12.0 K 2024-11-28T09:21:16,983 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/c274091a558a4b30a23858f0bab1f103 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/c274091a558a4b30a23858f0bab1f103 2024-11-28T09:21:17,000 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/c274091a558a4b30a23858f0bab1f103, entries=150, sequenceid=367, filesize=12.0 K 2024-11-28T09:21:17,002 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/e5c716f6ec9f4dc18f6c4a1b52bcd33f as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/e5c716f6ec9f4dc18f6c4a1b52bcd33f 2024-11-28T09:21:17,013 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/e5c716f6ec9f4dc18f6c4a1b52bcd33f, entries=150, sequenceid=367, filesize=12.0 K 2024-11-28T09:21:17,017 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 9324112e51bee406916a385aca28ddff in 211ms, sequenceid=367, compaction requested=true 2024-11-28T09:21:17,017 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9324112e51bee406916a385aca28ddff: 2024-11-28T09:21:17,018 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:21:17,018 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9324112e51bee406916a385aca28ddff:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T09:21:17,018 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:21:17,018 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:21:17,020 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:21:17,020 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1540): 9324112e51bee406916a385aca28ddff/A is initiating minor compaction (all files) 2024-11-28T09:21:17,020 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9324112e51bee406916a385aca28ddff/A in TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:17,020 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/d328d7f0c2584593bc921fe9950dab16, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/da83118749c94a439c19261f241693f5, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/5e03a7525e834955ac51ac8fb62f4365] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp, totalSize=36.8 K 2024-11-28T09:21:17,020 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9324112e51bee406916a385aca28ddff:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T09:21:17,020 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:21:17,020 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:21:17,020 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): 9324112e51bee406916a385aca28ddff/B is initiating minor compaction (all files) 2024-11-28T09:21:17,021 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9324112e51bee406916a385aca28ddff/B in TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:17,021 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting d328d7f0c2584593bc921fe9950dab16, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=327, earliestPutTs=1732785675512 2024-11-28T09:21:17,021 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/ecb98be753854a5085a65df5bd006166, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/b42dd754c2294a6ca14f3daba3943932, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/c274091a558a4b30a23858f0bab1f103] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp, totalSize=36.8 K 2024-11-28T09:21:17,021 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting da83118749c94a439c19261f241693f5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=355, earliestPutTs=1732785676171 2024-11-28T09:21:17,022 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting ecb98be753854a5085a65df5bd006166, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=327, earliestPutTs=1732785675512 2024-11-28T09:21:17,022 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5e03a7525e834955ac51ac8fb62f4365, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=367, earliestPutTs=1732785676489 2024-11-28T09:21:17,022 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9324112e51bee406916a385aca28ddff:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T09:21:17,022 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:21:17,022 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting b42dd754c2294a6ca14f3daba3943932, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=355, earliestPutTs=1732785676171 2024-11-28T09:21:17,023 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting c274091a558a4b30a23858f0bab1f103, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=367, earliestPutTs=1732785676489 2024-11-28T09:21:17,038 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9324112e51bee406916a385aca28ddff#A#compaction#78 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:21:17,039 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/93ff1aad62b2484ebd241aafed147a6d is 50, key is test_row_0/A:col10/1732785676489/Put/seqid=0 2024-11-28T09:21:17,050 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9324112e51bee406916a385aca28ddff#B#compaction#79 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:21:17,051 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/a851c5d966874d6d85a332bf1252bafd is 50, key is test_row_0/B:col10/1732785676489/Put/seqid=0 2024-11-28T09:21:17,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741917_1093 (size=13187) 2024-11-28T09:21:17,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741918_1094 (size=13187) 2024-11-28T09:21:17,083 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/93ff1aad62b2484ebd241aafed147a6d as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/93ff1aad62b2484ebd241aafed147a6d 2024-11-28T09:21:17,091 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/a851c5d966874d6d85a332bf1252bafd as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/a851c5d966874d6d85a332bf1252bafd 2024-11-28T09:21:17,094 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9324112e51bee406916a385aca28ddff/A of 9324112e51bee406916a385aca28ddff into 93ff1aad62b2484ebd241aafed147a6d(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:21:17,094 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9324112e51bee406916a385aca28ddff: 2024-11-28T09:21:17,094 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff., storeName=9324112e51bee406916a385aca28ddff/A, priority=13, startTime=1732785677017; duration=0sec 2024-11-28T09:21:17,094 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:21:17,094 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9324112e51bee406916a385aca28ddff:A 2024-11-28T09:21:17,094 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:21:17,095 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:21:17,095 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1540): 9324112e51bee406916a385aca28ddff/C is initiating minor compaction (all files) 2024-11-28T09:21:17,096 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9324112e51bee406916a385aca28ddff/C in TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:17,096 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/2d5538daf2964794a6ca46f8c2fc9917, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/034816f3b2164364bf6ff0ef36bb7d51, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/e5c716f6ec9f4dc18f6c4a1b52bcd33f] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp, totalSize=36.8 K 2024-11-28T09:21:17,096 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2d5538daf2964794a6ca46f8c2fc9917, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=327, earliestPutTs=1732785675512 2024-11-28T09:21:17,097 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 034816f3b2164364bf6ff0ef36bb7d51, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=355, earliestPutTs=1732785676171 2024-11-28T09:21:17,098 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting e5c716f6ec9f4dc18f6c4a1b52bcd33f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=367, earliestPutTs=1732785676489 2024-11-28T09:21:17,101 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9324112e51bee406916a385aca28ddff/B of 9324112e51bee406916a385aca28ddff into a851c5d966874d6d85a332bf1252bafd(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:21:17,101 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9324112e51bee406916a385aca28ddff: 2024-11-28T09:21:17,101 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff., storeName=9324112e51bee406916a385aca28ddff/B, priority=13, startTime=1732785677018; duration=0sec 2024-11-28T09:21:17,101 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:21:17,101 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9324112e51bee406916a385aca28ddff:B 2024-11-28T09:21:17,111 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9324112e51bee406916a385aca28ddff#C#compaction#80 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:21:17,112 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/6d11d81a377f4183a1e5ea5cb457ea2b is 50, key is test_row_0/C:col10/1732785676489/Put/seqid=0 2024-11-28T09:21:17,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741919_1095 (size=13187) 2024-11-28T09:21:17,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 9324112e51bee406916a385aca28ddff 2024-11-28T09:21:17,147 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9324112e51bee406916a385aca28ddff 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-28T09:21:17,148 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=A 2024-11-28T09:21:17,148 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:17,149 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=B 2024-11-28T09:21:17,149 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:17,149 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=C 2024-11-28T09:21:17,149 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:17,159 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/6d11d81a377f4183a1e5ea5cb457ea2b as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/6d11d81a377f4183a1e5ea5cb457ea2b 2024-11-28T09:21:17,161 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:17,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58928 deadline: 1732785737157, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:17,162 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:17,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58956 deadline: 1732785737158, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:17,162 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:17,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58952 deadline: 1732785737159, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:17,166 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:17,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58910 deadline: 1732785737160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:17,167 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:17,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58950 deadline: 1732785737161, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:17,170 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/47c3fe778b884a88a31113570baa635e is 50, key is test_row_0/A:col10/1732785676835/Put/seqid=0 2024-11-28T09:21:17,171 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9324112e51bee406916a385aca28ddff/C of 9324112e51bee406916a385aca28ddff into 6d11d81a377f4183a1e5ea5cb457ea2b(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:21:17,171 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9324112e51bee406916a385aca28ddff: 2024-11-28T09:21:17,171 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff., storeName=9324112e51bee406916a385aca28ddff/C, priority=13, startTime=1732785677022; duration=0sec 2024-11-28T09:21:17,171 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:21:17,171 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9324112e51bee406916a385aca28ddff:C 2024-11-28T09:21:17,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741920_1096 (size=17181) 2024-11-28T09:21:17,198 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=396 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/47c3fe778b884a88a31113570baa635e 2024-11-28T09:21:17,225 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/bf530c833d254ff0a594176b5ad86a62 is 50, key is test_row_0/B:col10/1732785676835/Put/seqid=0 2024-11-28T09:21:17,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741921_1097 (size=12301) 2024-11-28T09:21:17,254 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=396 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/bf530c833d254ff0a594176b5ad86a62 2024-11-28T09:21:17,264 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:17,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58928 deadline: 1732785737263, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:17,267 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/edb235d5ad3c4e91a4df77416adb6ed4 is 50, key is test_row_0/C:col10/1732785676835/Put/seqid=0 2024-11-28T09:21:17,268 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:17,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58952 deadline: 1732785737265, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:17,271 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:17,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58910 deadline: 1732785737268, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:17,271 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:17,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58950 deadline: 1732785737269, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:17,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741922_1098 (size=12301) 2024-11-28T09:21:17,470 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:17,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58928 deadline: 1732785737469, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:17,471 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:17,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58956 deadline: 1732785737469, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:17,474 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:17,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58952 deadline: 1732785737471, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:17,475 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:17,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58950 deadline: 1732785737473, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:17,475 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:17,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58910 deadline: 1732785737473, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:17,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-28T09:21:17,492 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 24 completed 2024-11-28T09:21:17,495 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T09:21:17,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] procedure2.ProcedureExecutor(1098): Stored pid=26, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees 2024-11-28T09:21:17,498 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=26, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T09:21:17,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-28T09:21:17,499 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=26, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T09:21:17,499 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=27, ppid=26, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T09:21:17,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-28T09:21:17,654 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:17,654 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-28T09:21:17,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:17,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. as already flushing 2024-11-28T09:21:17,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:17,655 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:17,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:17,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:17,694 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=396 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/edb235d5ad3c4e91a4df77416adb6ed4 2024-11-28T09:21:17,702 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/47c3fe778b884a88a31113570baa635e as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/47c3fe778b884a88a31113570baa635e 2024-11-28T09:21:17,709 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/47c3fe778b884a88a31113570baa635e, entries=250, sequenceid=396, filesize=16.8 K 2024-11-28T09:21:17,710 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/bf530c833d254ff0a594176b5ad86a62 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/bf530c833d254ff0a594176b5ad86a62 2024-11-28T09:21:17,717 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/bf530c833d254ff0a594176b5ad86a62, entries=150, sequenceid=396, filesize=12.0 K 2024-11-28T09:21:17,719 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/edb235d5ad3c4e91a4df77416adb6ed4 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/edb235d5ad3c4e91a4df77416adb6ed4 2024-11-28T09:21:17,726 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/edb235d5ad3c4e91a4df77416adb6ed4, entries=150, sequenceid=396, filesize=12.0 K 2024-11-28T09:21:17,727 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for 9324112e51bee406916a385aca28ddff in 580ms, sequenceid=396, compaction requested=false 2024-11-28T09:21:17,728 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9324112e51bee406916a385aca28ddff: 2024-11-28T09:21:17,782 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9324112e51bee406916a385aca28ddff 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-28T09:21:17,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 9324112e51bee406916a385aca28ddff 2024-11-28T09:21:17,783 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=A 2024-11-28T09:21:17,783 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:17,783 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=B 2024-11-28T09:21:17,784 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:17,784 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=C 2024-11-28T09:21:17,784 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:17,798 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/1085bbdd55284d5e9d8a25e551c9ecca is 50, key is test_row_0/A:col10/1732785677779/Put/seqid=0 2024-11-28T09:21:17,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-28T09:21:17,809 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:17,809 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-28T09:21:17,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:17,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. as already flushing 2024-11-28T09:21:17,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:17,810 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:17,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:17,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:17,822 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:17,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58952 deadline: 1732785737818, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:17,823 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:17,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58928 deadline: 1732785737819, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:17,823 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:17,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58950 deadline: 1732785737820, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:17,824 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:17,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58910 deadline: 1732785737820, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:17,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741923_1099 (size=14741) 2024-11-28T09:21:17,927 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:17,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58952 deadline: 1732785737924, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:17,927 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:17,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58928 deadline: 1732785737925, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:17,928 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:17,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58950 deadline: 1732785737925, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:17,934 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:17,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58910 deadline: 1732785737932, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:17,962 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:17,962 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-28T09:21:17,963 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:17,963 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. as already flushing 2024-11-28T09:21:17,963 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:17,963 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:17,963 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:17,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:17,973 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:17,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58956 deadline: 1732785737972, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:18,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-28T09:21:18,115 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:18,116 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-28T09:21:18,116 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:18,116 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. as already flushing 2024-11-28T09:21:18,116 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:18,116 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:18,116 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:18,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:18,133 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:18,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58950 deadline: 1732785738130, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:18,133 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:18,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58928 deadline: 1732785738130, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:18,134 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:18,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58952 deadline: 1732785738133, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:18,137 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:18,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58910 deadline: 1732785738137, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:18,239 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=410 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/1085bbdd55284d5e9d8a25e551c9ecca 2024-11-28T09:21:18,250 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/a9a00bd3ce94400699fa5df86dbed03d is 50, key is test_row_0/B:col10/1732785677779/Put/seqid=0 2024-11-28T09:21:18,269 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:18,270 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-28T09:21:18,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:18,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. as already flushing 2024-11-28T09:21:18,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:18,271 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:18,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:18,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:18,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741924_1100 (size=12301) 2024-11-28T09:21:18,287 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=410 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/a9a00bd3ce94400699fa5df86dbed03d 2024-11-28T09:21:18,301 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/bde5741138d947fcaa18b8875ecdb400 is 50, key is test_row_0/C:col10/1732785677779/Put/seqid=0 2024-11-28T09:21:18,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741925_1101 (size=12301) 2024-11-28T09:21:18,342 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=410 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/bde5741138d947fcaa18b8875ecdb400 2024-11-28T09:21:18,353 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/1085bbdd55284d5e9d8a25e551c9ecca as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/1085bbdd55284d5e9d8a25e551c9ecca 2024-11-28T09:21:18,363 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/1085bbdd55284d5e9d8a25e551c9ecca, entries=200, sequenceid=410, filesize=14.4 K 2024-11-28T09:21:18,366 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/a9a00bd3ce94400699fa5df86dbed03d as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/a9a00bd3ce94400699fa5df86dbed03d 2024-11-28T09:21:18,372 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/a9a00bd3ce94400699fa5df86dbed03d, entries=150, sequenceid=410, filesize=12.0 K 2024-11-28T09:21:18,373 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/bde5741138d947fcaa18b8875ecdb400 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/bde5741138d947fcaa18b8875ecdb400 2024-11-28T09:21:18,382 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/bde5741138d947fcaa18b8875ecdb400, entries=150, sequenceid=410, filesize=12.0 K 2024-11-28T09:21:18,383 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 9324112e51bee406916a385aca28ddff in 601ms, sequenceid=410, compaction requested=true 2024-11-28T09:21:18,383 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9324112e51bee406916a385aca28ddff: 2024-11-28T09:21:18,384 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9324112e51bee406916a385aca28ddff:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T09:21:18,384 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:21:18,384 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:21:18,384 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:21:18,384 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9324112e51bee406916a385aca28ddff:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T09:21:18,384 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:21:18,385 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 45109 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:21:18,385 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1540): 9324112e51bee406916a385aca28ddff/A is initiating minor compaction (all files) 2024-11-28T09:21:18,385 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9324112e51bee406916a385aca28ddff/A in TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:18,386 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/93ff1aad62b2484ebd241aafed147a6d, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/47c3fe778b884a88a31113570baa635e, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/1085bbdd55284d5e9d8a25e551c9ecca] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp, totalSize=44.1 K 2024-11-28T09:21:18,386 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37789 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:21:18,386 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): 9324112e51bee406916a385aca28ddff/B is initiating minor compaction (all files) 2024-11-28T09:21:18,386 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9324112e51bee406916a385aca28ddff/B in TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:18,386 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/a851c5d966874d6d85a332bf1252bafd, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/bf530c833d254ff0a594176b5ad86a62, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/a9a00bd3ce94400699fa5df86dbed03d] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp, totalSize=36.9 K 2024-11-28T09:21:18,387 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 93ff1aad62b2484ebd241aafed147a6d, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=367, earliestPutTs=1732785676489 2024-11-28T09:21:18,387 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting a851c5d966874d6d85a332bf1252bafd, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=367, earliestPutTs=1732785676489 2024-11-28T09:21:18,387 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 47c3fe778b884a88a31113570baa635e, keycount=250, bloomtype=ROW, size=16.8 K, encoding=NONE, compression=NONE, seqNum=396, earliestPutTs=1732785676833 2024-11-28T09:21:18,388 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting bf530c833d254ff0a594176b5ad86a62, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=396, earliestPutTs=1732785676835 2024-11-28T09:21:18,388 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1085bbdd55284d5e9d8a25e551c9ecca, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=410, earliestPutTs=1732785677777 2024-11-28T09:21:18,388 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting a9a00bd3ce94400699fa5df86dbed03d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=410, earliestPutTs=1732785677779 2024-11-28T09:21:18,398 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9324112e51bee406916a385aca28ddff:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T09:21:18,398 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:21:18,413 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9324112e51bee406916a385aca28ddff#B#compaction#87 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:21:18,414 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/17c5677a3f6a406686a4d8b52c177684 is 50, key is test_row_0/B:col10/1732785677779/Put/seqid=0 2024-11-28T09:21:18,415 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9324112e51bee406916a385aca28ddff#A#compaction#88 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:21:18,416 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/4a014196b8284b5f9565b760fb79f4a2 is 50, key is test_row_0/A:col10/1732785677779/Put/seqid=0 2024-11-28T09:21:18,424 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:18,424 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-28T09:21:18,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:18,425 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2837): Flushing 9324112e51bee406916a385aca28ddff 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-28T09:21:18,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=A 2024-11-28T09:21:18,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:18,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=B 2024-11-28T09:21:18,426 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:18,426 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=C 2024-11-28T09:21:18,426 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:18,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 9324112e51bee406916a385aca28ddff 2024-11-28T09:21:18,440 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. as already flushing 2024-11-28T09:21:18,442 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/6579b96d8ff3449fb4dc8231d53c77c5 is 50, key is test_row_0/A:col10/1732785677818/Put/seqid=0 2024-11-28T09:21:18,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741927_1103 (size=13289) 2024-11-28T09:21:18,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741926_1102 (size=13289) 2024-11-28T09:21:18,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741928_1104 (size=12301) 2024-11-28T09:21:18,461 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:18,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58910 deadline: 1732785738456, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:18,461 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=433 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/6579b96d8ff3449fb4dc8231d53c77c5 2024-11-28T09:21:18,462 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:18,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58928 deadline: 1732785738457, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:18,463 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:18,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58952 deadline: 1732785738458, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:18,463 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:18,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58950 deadline: 1732785738458, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:18,473 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/8e8805aafd78428b87670fa9bccf2b17 is 50, key is test_row_0/B:col10/1732785677818/Put/seqid=0 2024-11-28T09:21:18,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741929_1105 (size=12301) 2024-11-28T09:21:18,502 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=433 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/8e8805aafd78428b87670fa9bccf2b17 2024-11-28T09:21:18,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/dd1b5b9788734180a010609ced4a5730 is 50, key is test_row_0/C:col10/1732785677818/Put/seqid=0 2024-11-28T09:21:18,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741930_1106 (size=12301) 2024-11-28T09:21:18,536 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=433 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/dd1b5b9788734180a010609ced4a5730 2024-11-28T09:21:18,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/6579b96d8ff3449fb4dc8231d53c77c5 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/6579b96d8ff3449fb4dc8231d53c77c5 2024-11-28T09:21:18,551 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/6579b96d8ff3449fb4dc8231d53c77c5, entries=150, sequenceid=433, filesize=12.0 K 2024-11-28T09:21:18,553 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/8e8805aafd78428b87670fa9bccf2b17 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/8e8805aafd78428b87670fa9bccf2b17 2024-11-28T09:21:18,564 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:18,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58910 deadline: 1732785738562, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:18,565 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:18,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58928 deadline: 1732785738564, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:18,567 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:18,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58950 deadline: 1732785738566, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:18,567 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:18,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58952 deadline: 1732785738566, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:18,572 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/8e8805aafd78428b87670fa9bccf2b17, entries=150, sequenceid=433, filesize=12.0 K 2024-11-28T09:21:18,574 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/dd1b5b9788734180a010609ced4a5730 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/dd1b5b9788734180a010609ced4a5730 2024-11-28T09:21:18,585 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/dd1b5b9788734180a010609ced4a5730, entries=150, sequenceid=433, filesize=12.0 K 2024-11-28T09:21:18,586 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=80.51 KB/82440 for 9324112e51bee406916a385aca28ddff in 161ms, sequenceid=433, compaction requested=true 2024-11-28T09:21:18,587 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2538): Flush status journal for 9324112e51bee406916a385aca28ddff: 2024-11-28T09:21:18,587 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:18,587 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=27 2024-11-28T09:21:18,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4106): Remote procedure done, pid=27 2024-11-28T09:21:18,594 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=27, resume processing ppid=26 2024-11-28T09:21:18,594 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=27, ppid=26, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0930 sec 2024-11-28T09:21:18,596 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=26, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees in 1.1000 sec 2024-11-28T09:21:18,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-28T09:21:18,603 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 26 completed 2024-11-28T09:21:18,604 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T09:21:18,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] procedure2.ProcedureExecutor(1098): Stored pid=28, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees 2024-11-28T09:21:18,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-28T09:21:18,609 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=28, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T09:21:18,610 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=28, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T09:21:18,610 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=29, ppid=28, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T09:21:18,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-28T09:21:18,762 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:18,763 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-28T09:21:18,763 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:18,763 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2837): Flushing 9324112e51bee406916a385aca28ddff 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-28T09:21:18,763 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=A 2024-11-28T09:21:18,763 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:18,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=B 2024-11-28T09:21:18,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:18,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=C 2024-11-28T09:21:18,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:18,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 9324112e51bee406916a385aca28ddff 2024-11-28T09:21:18,768 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. as already flushing 2024-11-28T09:21:18,769 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/e60a994fe16d40e088e7787f838c10cd is 50, key is test_row_0/A:col10/1732785678455/Put/seqid=0 2024-11-28T09:21:18,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741931_1107 (size=12301) 2024-11-28T09:21:18,798 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:18,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58952 deadline: 1732785738794, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:18,800 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:18,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58928 deadline: 1732785738796, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:18,801 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:18,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58910 deadline: 1732785738798, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:18,802 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:18,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58950 deadline: 1732785738801, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:18,855 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/4a014196b8284b5f9565b760fb79f4a2 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/4a014196b8284b5f9565b760fb79f4a2 2024-11-28T09:21:18,865 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/17c5677a3f6a406686a4d8b52c177684 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/17c5677a3f6a406686a4d8b52c177684 2024-11-28T09:21:18,873 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9324112e51bee406916a385aca28ddff/B of 9324112e51bee406916a385aca28ddff into 17c5677a3f6a406686a4d8b52c177684(size=13.0 K), total size for store is 25.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:21:18,873 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9324112e51bee406916a385aca28ddff: 2024-11-28T09:21:18,873 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff., storeName=9324112e51bee406916a385aca28ddff/B, priority=13, startTime=1732785678384; duration=0sec 2024-11-28T09:21:18,873 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:21:18,873 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9324112e51bee406916a385aca28ddff:B 2024-11-28T09:21:18,874 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T09:21:18,877 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50090 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T09:21:18,877 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): 9324112e51bee406916a385aca28ddff/C is initiating minor compaction (all files) 2024-11-28T09:21:18,877 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9324112e51bee406916a385aca28ddff/C in TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:18,877 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/6d11d81a377f4183a1e5ea5cb457ea2b, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/edb235d5ad3c4e91a4df77416adb6ed4, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/bde5741138d947fcaa18b8875ecdb400, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/dd1b5b9788734180a010609ced4a5730] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp, totalSize=48.9 K 2024-11-28T09:21:18,878 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9324112e51bee406916a385aca28ddff/A of 9324112e51bee406916a385aca28ddff into 4a014196b8284b5f9565b760fb79f4a2(size=13.0 K), total size for store is 25.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:21:18,878 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 6d11d81a377f4183a1e5ea5cb457ea2b, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=367, earliestPutTs=1732785676489 2024-11-28T09:21:18,878 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9324112e51bee406916a385aca28ddff: 2024-11-28T09:21:18,878 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff., storeName=9324112e51bee406916a385aca28ddff/A, priority=13, startTime=1732785678383; duration=0sec 2024-11-28T09:21:18,878 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:21:18,878 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9324112e51bee406916a385aca28ddff:A 2024-11-28T09:21:18,878 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting edb235d5ad3c4e91a4df77416adb6ed4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=396, earliestPutTs=1732785676835 2024-11-28T09:21:18,879 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting bde5741138d947fcaa18b8875ecdb400, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=410, earliestPutTs=1732785677779 2024-11-28T09:21:18,881 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting dd1b5b9788734180a010609ced4a5730, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=433, earliestPutTs=1732785677813 2024-11-28T09:21:18,899 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9324112e51bee406916a385aca28ddff#C#compaction#93 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:21:18,900 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/58cde7fe1b6e49b99f02fc875426d0e7 is 50, key is test_row_0/C:col10/1732785677818/Put/seqid=0 2024-11-28T09:21:18,905 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:18,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58952 deadline: 1732785738900, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:18,906 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:18,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58950 deadline: 1732785738903, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:18,907 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:18,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58928 deadline: 1732785738903, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:18,907 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:18,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58910 deadline: 1732785738903, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:18,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-28T09:21:18,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741932_1108 (size=13323) 2024-11-28T09:21:18,927 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/58cde7fe1b6e49b99f02fc875426d0e7 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/58cde7fe1b6e49b99f02fc875426d0e7 2024-11-28T09:21:18,936 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 9324112e51bee406916a385aca28ddff/C of 9324112e51bee406916a385aca28ddff into 58cde7fe1b6e49b99f02fc875426d0e7(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:21:18,936 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9324112e51bee406916a385aca28ddff: 2024-11-28T09:21:18,936 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff., storeName=9324112e51bee406916a385aca28ddff/C, priority=12, startTime=1732785678384; duration=0sec 2024-11-28T09:21:18,936 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:21:18,936 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9324112e51bee406916a385aca28ddff:C 2024-11-28T09:21:18,985 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:18,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58956 deadline: 1732785738982, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:19,107 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:19,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58952 deadline: 1732785739107, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:19,110 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:19,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58950 deadline: 1732785739107, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:19,111 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:19,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58928 deadline: 1732785739108, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:19,111 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:19,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58910 deadline: 1732785739109, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:19,197 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=448 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/e60a994fe16d40e088e7787f838c10cd 2024-11-28T09:21:19,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-28T09:21:19,222 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/d6e6699dfe054747b7921a17ab21874b is 50, key is test_row_0/B:col10/1732785678455/Put/seqid=0 2024-11-28T09:21:19,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741933_1109 (size=12301) 2024-11-28T09:21:19,237 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=448 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/d6e6699dfe054747b7921a17ab21874b 2024-11-28T09:21:19,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/ae702beeaf71432598c8e39a2d37f6b2 is 50, key is test_row_0/C:col10/1732785678455/Put/seqid=0 2024-11-28T09:21:19,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741934_1110 (size=12301) 2024-11-28T09:21:19,411 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:19,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58952 deadline: 1732785739410, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:19,414 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:19,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58910 deadline: 1732785739413, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:19,415 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:19,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58950 deadline: 1732785739414, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:19,415 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:19,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58928 deadline: 1732785739414, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:19,661 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=448 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/ae702beeaf71432598c8e39a2d37f6b2 2024-11-28T09:21:19,673 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/e60a994fe16d40e088e7787f838c10cd as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/e60a994fe16d40e088e7787f838c10cd 2024-11-28T09:21:19,684 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/e60a994fe16d40e088e7787f838c10cd, entries=150, sequenceid=448, filesize=12.0 K 2024-11-28T09:21:19,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/d6e6699dfe054747b7921a17ab21874b as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/d6e6699dfe054747b7921a17ab21874b 2024-11-28T09:21:19,702 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/d6e6699dfe054747b7921a17ab21874b, entries=150, sequenceid=448, filesize=12.0 K 2024-11-28T09:21:19,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/ae702beeaf71432598c8e39a2d37f6b2 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/ae702beeaf71432598c8e39a2d37f6b2 2024-11-28T09:21:19,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-28T09:21:19,722 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/ae702beeaf71432598c8e39a2d37f6b2, entries=150, sequenceid=448, filesize=12.0 K 2024-11-28T09:21:19,725 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 9324112e51bee406916a385aca28ddff in 962ms, sequenceid=448, compaction requested=true 2024-11-28T09:21:19,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2538): Flush status journal for 9324112e51bee406916a385aca28ddff: 2024-11-28T09:21:19,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:19,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=29 2024-11-28T09:21:19,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4106): Remote procedure done, pid=29 2024-11-28T09:21:19,729 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=29, resume processing ppid=28 2024-11-28T09:21:19,729 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=29, ppid=28, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1170 sec 2024-11-28T09:21:19,732 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=28, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees in 1.1260 sec 2024-11-28T09:21:19,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 9324112e51bee406916a385aca28ddff 2024-11-28T09:21:19,919 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9324112e51bee406916a385aca28ddff 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-28T09:21:19,920 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=A 2024-11-28T09:21:19,920 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:19,920 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=B 2024-11-28T09:21:19,920 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:19,920 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=C 2024-11-28T09:21:19,920 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:19,934 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/c1482be9eb234f2e9628c788106c0514 is 50, key is test_row_0/A:col10/1732785678794/Put/seqid=0 2024-11-28T09:21:19,940 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:19,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58928 deadline: 1732785739935, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:19,943 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:19,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58910 deadline: 1732785739939, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:19,944 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:19,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58952 deadline: 1732785739941, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:19,944 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:19,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58950 deadline: 1732785739944, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:19,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741935_1111 (size=12301) 2024-11-28T09:21:20,047 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:20,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58952 deadline: 1732785740045, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:20,049 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:20,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58928 deadline: 1732785740049, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:20,050 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:20,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58950 deadline: 1732785740049, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:20,051 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:20,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58910 deadline: 1732785740050, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:20,255 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:20,255 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:20,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58952 deadline: 1732785740252, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:20,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58910 deadline: 1732785740252, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:20,258 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:20,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58950 deadline: 1732785740258, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:20,261 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:20,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58928 deadline: 1732785740261, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:20,359 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=474 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/c1482be9eb234f2e9628c788106c0514 2024-11-28T09:21:20,369 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/2b9885531e9c47f8830f8c5901a30716 is 50, key is test_row_0/B:col10/1732785678794/Put/seqid=0 2024-11-28T09:21:20,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741936_1112 (size=12301) 2024-11-28T09:21:20,559 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:20,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58952 deadline: 1732785740558, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:20,559 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:20,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58910 deadline: 1732785740558, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:20,565 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:20,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58950 deadline: 1732785740563, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:20,567 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:20,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58928 deadline: 1732785740565, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:20,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-28T09:21:20,714 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 28 completed 2024-11-28T09:21:20,717 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T09:21:20,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] procedure2.ProcedureExecutor(1098): Stored pid=30, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees 2024-11-28T09:21:20,720 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=30, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T09:21:20,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-28T09:21:20,721 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=30, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T09:21:20,721 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=31, ppid=30, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T09:21:20,781 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=474 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/2b9885531e9c47f8830f8c5901a30716 2024-11-28T09:21:20,792 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/32cfa76c57b841f9a5e08d9d29c61afd is 50, key is test_row_0/C:col10/1732785678794/Put/seqid=0 2024-11-28T09:21:20,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741937_1113 (size=12301) 2024-11-28T09:21:20,810 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=474 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/32cfa76c57b841f9a5e08d9d29c61afd 2024-11-28T09:21:20,816 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/c1482be9eb234f2e9628c788106c0514 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/c1482be9eb234f2e9628c788106c0514 2024-11-28T09:21:20,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-28T09:21:20,828 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/c1482be9eb234f2e9628c788106c0514, entries=150, sequenceid=474, filesize=12.0 K 2024-11-28T09:21:20,832 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/2b9885531e9c47f8830f8c5901a30716 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/2b9885531e9c47f8830f8c5901a30716 2024-11-28T09:21:20,841 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/2b9885531e9c47f8830f8c5901a30716, entries=150, sequenceid=474, filesize=12.0 K 2024-11-28T09:21:20,842 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/32cfa76c57b841f9a5e08d9d29c61afd as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/32cfa76c57b841f9a5e08d9d29c61afd 2024-11-28T09:21:20,849 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/32cfa76c57b841f9a5e08d9d29c61afd, entries=150, sequenceid=474, filesize=12.0 K 2024-11-28T09:21:20,850 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=73.80 KB/75570 for 9324112e51bee406916a385aca28ddff in 931ms, sequenceid=474, compaction requested=true 2024-11-28T09:21:20,850 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9324112e51bee406916a385aca28ddff: 2024-11-28T09:21:20,851 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9324112e51bee406916a385aca28ddff:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T09:21:20,851 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:21:20,851 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T09:21:20,851 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T09:21:20,853 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9324112e51bee406916a385aca28ddff:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T09:21:20,853 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:21:20,853 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9324112e51bee406916a385aca28ddff:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T09:21:20,853 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:21:20,854 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50192 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T09:21:20,854 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): 9324112e51bee406916a385aca28ddff/B is initiating minor compaction (all files) 2024-11-28T09:21:20,854 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9324112e51bee406916a385aca28ddff/B in TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:20,855 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50192 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T09:21:20,855 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/17c5677a3f6a406686a4d8b52c177684, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/8e8805aafd78428b87670fa9bccf2b17, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/d6e6699dfe054747b7921a17ab21874b, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/2b9885531e9c47f8830f8c5901a30716] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp, totalSize=49.0 K 2024-11-28T09:21:20,855 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1540): 9324112e51bee406916a385aca28ddff/A is initiating minor compaction (all files) 2024-11-28T09:21:20,855 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9324112e51bee406916a385aca28ddff/A in TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:20,855 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/4a014196b8284b5f9565b760fb79f4a2, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/6579b96d8ff3449fb4dc8231d53c77c5, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/e60a994fe16d40e088e7787f838c10cd, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/c1482be9eb234f2e9628c788106c0514] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp, totalSize=49.0 K 2024-11-28T09:21:20,856 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4a014196b8284b5f9565b760fb79f4a2, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=410, earliestPutTs=1732785677779 2024-11-28T09:21:20,856 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 17c5677a3f6a406686a4d8b52c177684, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=410, earliestPutTs=1732785677779 2024-11-28T09:21:20,856 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6579b96d8ff3449fb4dc8231d53c77c5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=433, earliestPutTs=1732785677813 2024-11-28T09:21:20,856 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 8e8805aafd78428b87670fa9bccf2b17, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=433, earliestPutTs=1732785677813 2024-11-28T09:21:20,857 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting e60a994fe16d40e088e7787f838c10cd, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=448, earliestPutTs=1732785678448 2024-11-28T09:21:20,857 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting d6e6699dfe054747b7921a17ab21874b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=448, earliestPutTs=1732785678448 2024-11-28T09:21:20,858 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting c1482be9eb234f2e9628c788106c0514, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=474, earliestPutTs=1732785678794 2024-11-28T09:21:20,858 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 2b9885531e9c47f8830f8c5901a30716, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=474, earliestPutTs=1732785678794 2024-11-28T09:21:20,874 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:20,875 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=31 2024-11-28T09:21:20,875 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:20,875 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2837): Flushing 9324112e51bee406916a385aca28ddff 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-28T09:21:20,876 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=A 2024-11-28T09:21:20,876 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:20,876 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=B 2024-11-28T09:21:20,876 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:20,876 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=C 2024-11-28T09:21:20,876 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:20,877 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9324112e51bee406916a385aca28ddff#A#compaction#99 average throughput is 1.64 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:21:20,878 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/298800e036424fcb885fa32e920fafdd is 50, key is test_row_0/A:col10/1732785678794/Put/seqid=0 2024-11-28T09:21:20,890 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9324112e51bee406916a385aca28ddff#B#compaction#100 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:21:20,891 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/84e204be4e244a6f9cbd083493f39651 is 50, key is test_row_0/B:col10/1732785678794/Put/seqid=0 2024-11-28T09:21:20,898 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/07cbd6c24c2d43cca9a6ac1e26585fe8 is 50, key is test_row_0/A:col10/1732785679937/Put/seqid=0 2024-11-28T09:21:20,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741938_1114 (size=13425) 2024-11-28T09:21:20,933 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/298800e036424fcb885fa32e920fafdd as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/298800e036424fcb885fa32e920fafdd 2024-11-28T09:21:20,942 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 9324112e51bee406916a385aca28ddff/A of 9324112e51bee406916a385aca28ddff into 298800e036424fcb885fa32e920fafdd(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:21:20,942 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9324112e51bee406916a385aca28ddff: 2024-11-28T09:21:20,942 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff., storeName=9324112e51bee406916a385aca28ddff/A, priority=12, startTime=1732785680850; duration=0sec 2024-11-28T09:21:20,942 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:21:20,943 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9324112e51bee406916a385aca28ddff:A 2024-11-28T09:21:20,944 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:21:20,948 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37925 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:21:20,948 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1540): 9324112e51bee406916a385aca28ddff/C is initiating minor compaction (all files) 2024-11-28T09:21:20,949 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9324112e51bee406916a385aca28ddff/C in TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:20,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741939_1115 (size=13425) 2024-11-28T09:21:20,949 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/58cde7fe1b6e49b99f02fc875426d0e7, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/ae702beeaf71432598c8e39a2d37f6b2, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/32cfa76c57b841f9a5e08d9d29c61afd] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp, totalSize=37.0 K 2024-11-28T09:21:20,949 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 58cde7fe1b6e49b99f02fc875426d0e7, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=433, earliestPutTs=1732785677813 2024-11-28T09:21:20,950 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting ae702beeaf71432598c8e39a2d37f6b2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=448, earliestPutTs=1732785678448 2024-11-28T09:21:20,950 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 32cfa76c57b841f9a5e08d9d29c61afd, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=474, earliestPutTs=1732785678794 2024-11-28T09:21:20,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741940_1116 (size=12301) 2024-11-28T09:21:20,957 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=488 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/07cbd6c24c2d43cca9a6ac1e26585fe8 2024-11-28T09:21:20,970 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9324112e51bee406916a385aca28ddff#C#compaction#102 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:21:20,971 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/e8999256f9f74ca991de0903b4f95e06 is 50, key is test_row_0/B:col10/1732785679937/Put/seqid=0 2024-11-28T09:21:20,971 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/54329f7cec6f48fb9f5aea20bd77f431 is 50, key is test_row_0/C:col10/1732785678794/Put/seqid=0 2024-11-28T09:21:20,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741941_1117 (size=12301) 2024-11-28T09:21:20,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741942_1118 (size=13425) 2024-11-28T09:21:20,990 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=488 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/e8999256f9f74ca991de0903b4f95e06 2024-11-28T09:21:21,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/ba75a0c09a594f1f92ca29c944c37832 is 50, key is test_row_0/C:col10/1732785679937/Put/seqid=0 2024-11-28T09:21:21,007 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. as already flushing 2024-11-28T09:21:21,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 9324112e51bee406916a385aca28ddff 2024-11-28T09:21:21,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741943_1119 (size=12301) 2024-11-28T09:21:21,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-28T09:21:21,053 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:21,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58956 deadline: 1732785741051, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:21,063 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:21,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58910 deadline: 1732785741062, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:21,064 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:21,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58952 deadline: 1732785741062, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:21,070 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:21,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58950 deadline: 1732785741067, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:21,070 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:21,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58928 deadline: 1732785741068, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:21,155 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:21,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58956 deadline: 1732785741155, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:21,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-28T09:21:21,358 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/84e204be4e244a6f9cbd083493f39651 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/84e204be4e244a6f9cbd083493f39651 2024-11-28T09:21:21,360 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:21,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58956 deadline: 1732785741359, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:21,364 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 9324112e51bee406916a385aca28ddff/B of 9324112e51bee406916a385aca28ddff into 84e204be4e244a6f9cbd083493f39651(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:21:21,365 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9324112e51bee406916a385aca28ddff: 2024-11-28T09:21:21,365 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff., storeName=9324112e51bee406916a385aca28ddff/B, priority=12, startTime=1732785680851; duration=0sec 2024-11-28T09:21:21,365 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:21:21,365 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9324112e51bee406916a385aca28ddff:B 2024-11-28T09:21:21,396 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/54329f7cec6f48fb9f5aea20bd77f431 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/54329f7cec6f48fb9f5aea20bd77f431 2024-11-28T09:21:21,406 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9324112e51bee406916a385aca28ddff/C of 9324112e51bee406916a385aca28ddff into 54329f7cec6f48fb9f5aea20bd77f431(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:21:21,407 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9324112e51bee406916a385aca28ddff: 2024-11-28T09:21:21,407 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff., storeName=9324112e51bee406916a385aca28ddff/C, priority=13, startTime=1732785680853; duration=0sec 2024-11-28T09:21:21,407 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:21:21,407 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9324112e51bee406916a385aca28ddff:C 2024-11-28T09:21:21,408 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=488 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/ba75a0c09a594f1f92ca29c944c37832 2024-11-28T09:21:21,415 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/07cbd6c24c2d43cca9a6ac1e26585fe8 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/07cbd6c24c2d43cca9a6ac1e26585fe8 2024-11-28T09:21:21,423 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/07cbd6c24c2d43cca9a6ac1e26585fe8, entries=150, sequenceid=488, filesize=12.0 K 2024-11-28T09:21:21,425 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/e8999256f9f74ca991de0903b4f95e06 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/e8999256f9f74ca991de0903b4f95e06 2024-11-28T09:21:21,432 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/e8999256f9f74ca991de0903b4f95e06, entries=150, sequenceid=488, filesize=12.0 K 2024-11-28T09:21:21,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/ba75a0c09a594f1f92ca29c944c37832 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/ba75a0c09a594f1f92ca29c944c37832 2024-11-28T09:21:21,439 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/ba75a0c09a594f1f92ca29c944c37832, entries=150, sequenceid=488, filesize=12.0 K 2024-11-28T09:21:21,440 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 9324112e51bee406916a385aca28ddff in 565ms, sequenceid=488, compaction requested=false 2024-11-28T09:21:21,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.HRegion(2538): Flush status journal for 9324112e51bee406916a385aca28ddff: 2024-11-28T09:21:21,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:21,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=31}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=31 2024-11-28T09:21:21,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4106): Remote procedure done, pid=31 2024-11-28T09:21:21,447 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=31, resume processing ppid=30 2024-11-28T09:21:21,447 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=31, ppid=30, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 721 msec 2024-11-28T09:21:21,449 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=30, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=30, table=TestAcidGuarantees in 730 msec 2024-11-28T09:21:21,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 9324112e51bee406916a385aca28ddff 2024-11-28T09:21:21,666 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9324112e51bee406916a385aca28ddff 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-28T09:21:21,666 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=A 2024-11-28T09:21:21,666 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:21,666 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=B 2024-11-28T09:21:21,667 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:21,667 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=C 2024-11-28T09:21:21,667 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:21,672 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/b7c4d06c13634c2ea1c6ec143e3f8511 is 50, key is test_row_0/A:col10/1732785681050/Put/seqid=0 2024-11-28T09:21:21,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741944_1120 (size=14741) 2024-11-28T09:21:21,689 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:21,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58956 deadline: 1732785741688, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:21,793 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:21,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58956 deadline: 1732785741790, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:21,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-28T09:21:21,824 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 30 completed 2024-11-28T09:21:21,826 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T09:21:21,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] procedure2.ProcedureExecutor(1098): Stored pid=32, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=32, table=TestAcidGuarantees 2024-11-28T09:21:21,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-28T09:21:21,828 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=32, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=32, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T09:21:21,829 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=32, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=32, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T09:21:21,829 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=33, ppid=32, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T09:21:21,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-28T09:21:21,980 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:21,981 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=33 2024-11-28T09:21:21,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:21,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. as already flushing 2024-11-28T09:21:21,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:21,981 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] handler.RSProcedureHandler(58): pid=33 java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:21,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=33 java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:21,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=33 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:21,997 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:21,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58956 deadline: 1732785741997, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:22,068 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:22,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58910 deadline: 1732785742065, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:22,073 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:22,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58950 deadline: 1732785742073, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:22,075 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:22,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58952 deadline: 1732785742075, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:22,076 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:22,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58928 deadline: 1732785742075, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:22,078 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=514 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/b7c4d06c13634c2ea1c6ec143e3f8511 2024-11-28T09:21:22,086 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/2e50387421eb4d25b53a14acd7a00990 is 50, key is test_row_0/B:col10/1732785681050/Put/seqid=0 2024-11-28T09:21:22,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741945_1121 (size=12301) 2024-11-28T09:21:22,093 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=514 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/2e50387421eb4d25b53a14acd7a00990 2024-11-28T09:21:22,102 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/23dd942bd20643688eb7dea534288d7c is 50, key is test_row_0/C:col10/1732785681050/Put/seqid=0 2024-11-28T09:21:22,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741946_1122 (size=12301) 2024-11-28T09:21:22,109 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=514 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/23dd942bd20643688eb7dea534288d7c 2024-11-28T09:21:22,117 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/b7c4d06c13634c2ea1c6ec143e3f8511 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/b7c4d06c13634c2ea1c6ec143e3f8511 2024-11-28T09:21:22,123 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/b7c4d06c13634c2ea1c6ec143e3f8511, entries=200, sequenceid=514, filesize=14.4 K 2024-11-28T09:21:22,126 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/2e50387421eb4d25b53a14acd7a00990 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/2e50387421eb4d25b53a14acd7a00990 2024-11-28T09:21:22,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-28T09:21:22,133 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/2e50387421eb4d25b53a14acd7a00990, entries=150, sequenceid=514, filesize=12.0 K 2024-11-28T09:21:22,134 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:22,134 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=33 2024-11-28T09:21:22,135 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:22,135 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. as already flushing 2024-11-28T09:21:22,135 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:22,135 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] handler.RSProcedureHandler(58): pid=33 java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:22,135 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/23dd942bd20643688eb7dea534288d7c as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/23dd942bd20643688eb7dea534288d7c 2024-11-28T09:21:22,135 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=33 java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:22,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=33 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:22,141 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/23dd942bd20643688eb7dea534288d7c, entries=150, sequenceid=514, filesize=12.0 K 2024-11-28T09:21:22,142 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 9324112e51bee406916a385aca28ddff in 476ms, sequenceid=514, compaction requested=true 2024-11-28T09:21:22,142 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9324112e51bee406916a385aca28ddff: 2024-11-28T09:21:22,142 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9324112e51bee406916a385aca28ddff:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T09:21:22,142 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:21:22,142 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9324112e51bee406916a385aca28ddff:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T09:21:22,142 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:21:22,142 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9324112e51bee406916a385aca28ddff:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T09:21:22,142 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:21:22,142 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-28T09:21:22,143 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:21:22,144 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38027 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:21:22,144 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): 9324112e51bee406916a385aca28ddff/B is initiating minor compaction (all files) 2024-11-28T09:21:22,144 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9324112e51bee406916a385aca28ddff/B in TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:22,145 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40467 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:21:22,145 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1540): 9324112e51bee406916a385aca28ddff/A is initiating minor compaction (all files) 2024-11-28T09:21:22,145 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9324112e51bee406916a385aca28ddff/A in TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:22,145 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/298800e036424fcb885fa32e920fafdd, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/07cbd6c24c2d43cca9a6ac1e26585fe8, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/b7c4d06c13634c2ea1c6ec143e3f8511] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp, totalSize=39.5 K 2024-11-28T09:21:22,145 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/84e204be4e244a6f9cbd083493f39651, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/e8999256f9f74ca991de0903b4f95e06, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/2e50387421eb4d25b53a14acd7a00990] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp, totalSize=37.1 K 2024-11-28T09:21:22,146 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 84e204be4e244a6f9cbd083493f39651, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=474, earliestPutTs=1732785678794 2024-11-28T09:21:22,146 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 298800e036424fcb885fa32e920fafdd, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=474, earliestPutTs=1732785678794 2024-11-28T09:21:22,146 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting e8999256f9f74ca991de0903b4f95e06, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=488, earliestPutTs=1732785679937 2024-11-28T09:21:22,147 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 07cbd6c24c2d43cca9a6ac1e26585fe8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=488, earliestPutTs=1732785679937 2024-11-28T09:21:22,147 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 2e50387421eb4d25b53a14acd7a00990, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=514, earliestPutTs=1732785681042 2024-11-28T09:21:22,147 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting b7c4d06c13634c2ea1c6ec143e3f8511, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=514, earliestPutTs=1732785681042 2024-11-28T09:21:22,162 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9324112e51bee406916a385aca28ddff#A#compaction#108 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:21:22,163 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/87bbb873772045dda23ae48717b62019 is 50, key is test_row_0/A:col10/1732785681050/Put/seqid=0 2024-11-28T09:21:22,166 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9324112e51bee406916a385aca28ddff#B#compaction#109 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:21:22,166 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/0d345da904c74eaf8d8ad0733eeed7f4 is 50, key is test_row_0/B:col10/1732785681050/Put/seqid=0 2024-11-28T09:21:22,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741947_1123 (size=13527) 2024-11-28T09:21:22,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741948_1124 (size=13527) 2024-11-28T09:21:22,203 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/0d345da904c74eaf8d8ad0733eeed7f4 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/0d345da904c74eaf8d8ad0733eeed7f4 2024-11-28T09:21:22,207 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/87bbb873772045dda23ae48717b62019 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/87bbb873772045dda23ae48717b62019 2024-11-28T09:21:22,215 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9324112e51bee406916a385aca28ddff/B of 9324112e51bee406916a385aca28ddff into 0d345da904c74eaf8d8ad0733eeed7f4(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:21:22,215 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9324112e51bee406916a385aca28ddff: 2024-11-28T09:21:22,215 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff., storeName=9324112e51bee406916a385aca28ddff/B, priority=13, startTime=1732785682142; duration=0sec 2024-11-28T09:21:22,215 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:21:22,215 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9324112e51bee406916a385aca28ddff:B 2024-11-28T09:21:22,215 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:21:22,218 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38027 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:21:22,218 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): 9324112e51bee406916a385aca28ddff/C is initiating minor compaction (all files) 2024-11-28T09:21:22,218 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9324112e51bee406916a385aca28ddff/C in TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:22,219 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/54329f7cec6f48fb9f5aea20bd77f431, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/ba75a0c09a594f1f92ca29c944c37832, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/23dd942bd20643688eb7dea534288d7c] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp, totalSize=37.1 K 2024-11-28T09:21:22,219 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 54329f7cec6f48fb9f5aea20bd77f431, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=474, earliestPutTs=1732785678794 2024-11-28T09:21:22,220 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9324112e51bee406916a385aca28ddff/A of 9324112e51bee406916a385aca28ddff into 87bbb873772045dda23ae48717b62019(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:21:22,220 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9324112e51bee406916a385aca28ddff: 2024-11-28T09:21:22,220 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff., storeName=9324112e51bee406916a385aca28ddff/A, priority=13, startTime=1732785682142; duration=0sec 2024-11-28T09:21:22,220 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting ba75a0c09a594f1f92ca29c944c37832, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=488, earliestPutTs=1732785679937 2024-11-28T09:21:22,220 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:21:22,220 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9324112e51bee406916a385aca28ddff:A 2024-11-28T09:21:22,220 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 23dd942bd20643688eb7dea534288d7c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=514, earliestPutTs=1732785681042 2024-11-28T09:21:22,234 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9324112e51bee406916a385aca28ddff#C#compaction#110 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:21:22,235 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/1d8805a4ed2f493ebc8709a5aee320e4 is 50, key is test_row_0/C:col10/1732785681050/Put/seqid=0 2024-11-28T09:21:22,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741949_1125 (size=13527) 2024-11-28T09:21:22,288 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:22,288 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=33 2024-11-28T09:21:22,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:22,289 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2837): Flushing 9324112e51bee406916a385aca28ddff 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-28T09:21:22,289 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=A 2024-11-28T09:21:22,289 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:22,289 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=B 2024-11-28T09:21:22,289 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:22,289 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=C 2024-11-28T09:21:22,289 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:22,295 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/de4b4ba574ca4d099bb65b96b0bbc19d is 50, key is test_row_0/A:col10/1732785681686/Put/seqid=0 2024-11-28T09:21:22,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 9324112e51bee406916a385aca28ddff 2024-11-28T09:21:22,302 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. as already flushing 2024-11-28T09:21:22,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741950_1126 (size=12301) 2024-11-28T09:21:22,314 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=529 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/de4b4ba574ca4d099bb65b96b0bbc19d 2024-11-28T09:21:22,324 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/cba26a53c5b64817a6cdd017cde34e4b is 50, key is test_row_0/B:col10/1732785681686/Put/seqid=0 2024-11-28T09:21:22,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741951_1127 (size=12301) 2024-11-28T09:21:22,366 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:22,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58956 deadline: 1732785742364, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:22,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-28T09:21:22,471 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:22,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58956 deadline: 1732785742469, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:22,661 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/1d8805a4ed2f493ebc8709a5aee320e4 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/1d8805a4ed2f493ebc8709a5aee320e4 2024-11-28T09:21:22,667 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9324112e51bee406916a385aca28ddff/C of 9324112e51bee406916a385aca28ddff into 1d8805a4ed2f493ebc8709a5aee320e4(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:21:22,667 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9324112e51bee406916a385aca28ddff: 2024-11-28T09:21:22,667 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff., storeName=9324112e51bee406916a385aca28ddff/C, priority=13, startTime=1732785682142; duration=0sec 2024-11-28T09:21:22,667 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:21:22,667 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9324112e51bee406916a385aca28ddff:C 2024-11-28T09:21:22,674 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:22,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58956 deadline: 1732785742673, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:22,742 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=529 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/cba26a53c5b64817a6cdd017cde34e4b 2024-11-28T09:21:22,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/6ea2ffa7488747618faedbd0335356d1 is 50, key is test_row_0/C:col10/1732785681686/Put/seqid=0 2024-11-28T09:21:22,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741952_1128 (size=12301) 2024-11-28T09:21:22,759 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=529 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/6ea2ffa7488747618faedbd0335356d1 2024-11-28T09:21:22,766 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/de4b4ba574ca4d099bb65b96b0bbc19d as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/de4b4ba574ca4d099bb65b96b0bbc19d 2024-11-28T09:21:22,773 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/de4b4ba574ca4d099bb65b96b0bbc19d, entries=150, sequenceid=529, filesize=12.0 K 2024-11-28T09:21:22,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/cba26a53c5b64817a6cdd017cde34e4b as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/cba26a53c5b64817a6cdd017cde34e4b 2024-11-28T09:21:22,785 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/cba26a53c5b64817a6cdd017cde34e4b, entries=150, sequenceid=529, filesize=12.0 K 2024-11-28T09:21:22,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/6ea2ffa7488747618faedbd0335356d1 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/6ea2ffa7488747618faedbd0335356d1 2024-11-28T09:21:22,796 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/6ea2ffa7488747618faedbd0335356d1, entries=150, sequenceid=529, filesize=12.0 K 2024-11-28T09:21:22,797 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 9324112e51bee406916a385aca28ddff in 508ms, sequenceid=529, compaction requested=false 2024-11-28T09:21:22,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.HRegion(2538): Flush status journal for 9324112e51bee406916a385aca28ddff: 2024-11-28T09:21:22,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:22,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=33}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=33 2024-11-28T09:21:22,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4106): Remote procedure done, pid=33 2024-11-28T09:21:22,800 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=33, resume processing ppid=32 2024-11-28T09:21:22,800 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=33, ppid=32, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 969 msec 2024-11-28T09:21:22,802 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=32, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=32, table=TestAcidGuarantees in 975 msec 2024-11-28T09:21:22,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=32 2024-11-28T09:21:22,931 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 32 completed 2024-11-28T09:21:22,932 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T09:21:22,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] procedure2.ProcedureExecutor(1098): Stored pid=34, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=34, table=TestAcidGuarantees 2024-11-28T09:21:22,934 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=34, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=34, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T09:21:22,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-28T09:21:22,935 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=34, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=34, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T09:21:22,935 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=35, ppid=34, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T09:21:22,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 9324112e51bee406916a385aca28ddff 2024-11-28T09:21:22,980 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9324112e51bee406916a385aca28ddff 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-28T09:21:22,980 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=A 2024-11-28T09:21:22,981 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:22,981 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=B 2024-11-28T09:21:22,981 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:22,981 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=C 2024-11-28T09:21:22,981 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:22,987 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/2f6e987e3f8b4b54afafcae04115f8dd is 50, key is test_row_0/A:col10/1732785682979/Put/seqid=0 2024-11-28T09:21:22,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741953_1129 (size=12301) 2024-11-28T09:21:23,006 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:23,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 226 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58956 deadline: 1732785743002, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:23,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-28T09:21:23,086 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:23,087 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=35 2024-11-28T09:21:23,087 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:23,087 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. as already flushing 2024-11-28T09:21:23,087 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:23,087 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] handler.RSProcedureHandler(58): pid=35 java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:23,088 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=35 java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:23,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=35 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:23,107 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:23,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 228 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58956 deadline: 1732785743107, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:23,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-28T09:21:23,240 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:23,240 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=35 2024-11-28T09:21:23,240 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:23,240 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. as already flushing 2024-11-28T09:21:23,241 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:23,241 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] handler.RSProcedureHandler(58): pid=35 java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:23,241 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=35 java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:23,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=35 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:23,315 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:23,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 230 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58956 deadline: 1732785743315, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:23,393 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:23,394 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=35 2024-11-28T09:21:23,394 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:23,394 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. as already flushing 2024-11-28T09:21:23,394 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:23,394 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=554 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/2f6e987e3f8b4b54afafcae04115f8dd 2024-11-28T09:21:23,394 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] handler.RSProcedureHandler(58): pid=35 java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:23,395 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=35 java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:23,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=35 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:23,408 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/d1ac739f65784be38387bb5fc2898bb4 is 50, key is test_row_0/B:col10/1732785682979/Put/seqid=0 2024-11-28T09:21:23,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741954_1130 (size=12301) 2024-11-28T09:21:23,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-28T09:21:23,547 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:23,547 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=35 2024-11-28T09:21:23,548 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:23,548 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. as already flushing 2024-11-28T09:21:23,548 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:23,548 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] handler.RSProcedureHandler(58): pid=35 java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:23,548 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=35 java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:23,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=35 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:23,619 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:23,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 232 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58956 deadline: 1732785743619, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:23,700 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:23,701 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=35 2024-11-28T09:21:23,701 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:23,701 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. as already flushing 2024-11-28T09:21:23,701 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:23,701 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] handler.RSProcedureHandler(58): pid=35 java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:23,701 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=35 java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:23,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=35 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:23,815 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=554 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/d1ac739f65784be38387bb5fc2898bb4 2024-11-28T09:21:23,824 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/e4fbaa3ca2f4471db8dc48450cda3c08 is 50, key is test_row_0/C:col10/1732785682979/Put/seqid=0 2024-11-28T09:21:23,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741955_1131 (size=12301) 2024-11-28T09:21:23,835 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=554 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/e4fbaa3ca2f4471db8dc48450cda3c08 2024-11-28T09:21:23,841 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/2f6e987e3f8b4b54afafcae04115f8dd as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/2f6e987e3f8b4b54afafcae04115f8dd 2024-11-28T09:21:23,846 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/2f6e987e3f8b4b54afafcae04115f8dd, entries=150, sequenceid=554, filesize=12.0 K 2024-11-28T09:21:23,847 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/d1ac739f65784be38387bb5fc2898bb4 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/d1ac739f65784be38387bb5fc2898bb4 2024-11-28T09:21:23,851 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/d1ac739f65784be38387bb5fc2898bb4, entries=150, sequenceid=554, filesize=12.0 K 2024-11-28T09:21:23,852 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/e4fbaa3ca2f4471db8dc48450cda3c08 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/e4fbaa3ca2f4471db8dc48450cda3c08 2024-11-28T09:21:23,854 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:23,855 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=35 2024-11-28T09:21:23,855 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:23,855 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. as already flushing 2024-11-28T09:21:23,855 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:23,855 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] handler.RSProcedureHandler(58): pid=35 java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:23,855 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=35 java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:23,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=35 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:23,858 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/e4fbaa3ca2f4471db8dc48450cda3c08, entries=150, sequenceid=554, filesize=12.0 K 2024-11-28T09:21:23,860 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 9324112e51bee406916a385aca28ddff in 880ms, sequenceid=554, compaction requested=true 2024-11-28T09:21:23,860 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9324112e51bee406916a385aca28ddff: 2024-11-28T09:21:23,860 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9324112e51bee406916a385aca28ddff:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T09:21:23,860 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:21:23,860 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:21:23,860 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9324112e51bee406916a385aca28ddff:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T09:21:23,860 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:21:23,860 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:21:23,860 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9324112e51bee406916a385aca28ddff:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T09:21:23,860 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:21:23,862 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38129 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:21:23,862 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38129 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:21:23,862 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): 9324112e51bee406916a385aca28ddff/B is initiating minor compaction (all files) 2024-11-28T09:21:23,862 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1540): 9324112e51bee406916a385aca28ddff/A is initiating minor compaction (all files) 2024-11-28T09:21:23,862 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9324112e51bee406916a385aca28ddff/B in TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:23,862 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9324112e51bee406916a385aca28ddff/A in TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:23,862 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/0d345da904c74eaf8d8ad0733eeed7f4, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/cba26a53c5b64817a6cdd017cde34e4b, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/d1ac739f65784be38387bb5fc2898bb4] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp, totalSize=37.2 K 2024-11-28T09:21:23,862 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/87bbb873772045dda23ae48717b62019, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/de4b4ba574ca4d099bb65b96b0bbc19d, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/2f6e987e3f8b4b54afafcae04115f8dd] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp, totalSize=37.2 K 2024-11-28T09:21:23,863 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 0d345da904c74eaf8d8ad0733eeed7f4, keycount=150, bloomtype=ROW, size=13.2 K, encoding=NONE, compression=NONE, seqNum=514, earliestPutTs=1732785681042 2024-11-28T09:21:23,863 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 87bbb873772045dda23ae48717b62019, keycount=150, bloomtype=ROW, size=13.2 K, encoding=NONE, compression=NONE, seqNum=514, earliestPutTs=1732785681042 2024-11-28T09:21:23,863 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting cba26a53c5b64817a6cdd017cde34e4b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=529, earliestPutTs=1732785681672 2024-11-28T09:21:23,864 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting d1ac739f65784be38387bb5fc2898bb4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=554, earliestPutTs=1732785682351 2024-11-28T09:21:23,864 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting de4b4ba574ca4d099bb65b96b0bbc19d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=529, earliestPutTs=1732785681672 2024-11-28T09:21:23,866 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2f6e987e3f8b4b54afafcae04115f8dd, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=554, earliestPutTs=1732785682351 2024-11-28T09:21:23,876 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9324112e51bee406916a385aca28ddff#B#compaction#117 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:21:23,877 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/bca48b4b5b0548d28e2ddd330218a98b is 50, key is test_row_0/B:col10/1732785682979/Put/seqid=0 2024-11-28T09:21:23,878 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9324112e51bee406916a385aca28ddff#A#compaction#118 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:21:23,878 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/0ed7a2e236be420d9251d50ecd06b945 is 50, key is test_row_0/A:col10/1732785682979/Put/seqid=0 2024-11-28T09:21:23,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741956_1132 (size=13629) 2024-11-28T09:21:23,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741957_1133 (size=13629) 2024-11-28T09:21:23,907 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/0ed7a2e236be420d9251d50ecd06b945 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/0ed7a2e236be420d9251d50ecd06b945 2024-11-28T09:21:23,911 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/bca48b4b5b0548d28e2ddd330218a98b as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/bca48b4b5b0548d28e2ddd330218a98b 2024-11-28T09:21:23,916 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9324112e51bee406916a385aca28ddff/A of 9324112e51bee406916a385aca28ddff into 0ed7a2e236be420d9251d50ecd06b945(size=13.3 K), total size for store is 13.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:21:23,917 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9324112e51bee406916a385aca28ddff: 2024-11-28T09:21:23,917 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff., storeName=9324112e51bee406916a385aca28ddff/A, priority=13, startTime=1732785683860; duration=0sec 2024-11-28T09:21:23,918 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:21:23,918 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9324112e51bee406916a385aca28ddff:A 2024-11-28T09:21:23,918 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:21:23,919 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38129 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:21:23,919 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1540): 9324112e51bee406916a385aca28ddff/C is initiating minor compaction (all files) 2024-11-28T09:21:23,920 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9324112e51bee406916a385aca28ddff/C in TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:23,920 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/1d8805a4ed2f493ebc8709a5aee320e4, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/6ea2ffa7488747618faedbd0335356d1, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/e4fbaa3ca2f4471db8dc48450cda3c08] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp, totalSize=37.2 K 2024-11-28T09:21:23,921 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1d8805a4ed2f493ebc8709a5aee320e4, keycount=150, bloomtype=ROW, size=13.2 K, encoding=NONE, compression=NONE, seqNum=514, earliestPutTs=1732785681042 2024-11-28T09:21:23,921 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9324112e51bee406916a385aca28ddff/B of 9324112e51bee406916a385aca28ddff into bca48b4b5b0548d28e2ddd330218a98b(size=13.3 K), total size for store is 13.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:21:23,921 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9324112e51bee406916a385aca28ddff: 2024-11-28T09:21:23,921 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff., storeName=9324112e51bee406916a385aca28ddff/B, priority=13, startTime=1732785683860; duration=0sec 2024-11-28T09:21:23,921 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:21:23,921 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9324112e51bee406916a385aca28ddff:B 2024-11-28T09:21:23,922 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6ea2ffa7488747618faedbd0335356d1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=529, earliestPutTs=1732785681672 2024-11-28T09:21:23,922 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting e4fbaa3ca2f4471db8dc48450cda3c08, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=554, earliestPutTs=1732785682351 2024-11-28T09:21:23,934 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9324112e51bee406916a385aca28ddff#C#compaction#119 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:21:23,935 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/175c62654f9f4e99ab1927a25381a5c4 is 50, key is test_row_0/C:col10/1732785682979/Put/seqid=0 2024-11-28T09:21:23,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741958_1134 (size=13629) 2024-11-28T09:21:23,980 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/175c62654f9f4e99ab1927a25381a5c4 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/175c62654f9f4e99ab1927a25381a5c4 2024-11-28T09:21:23,991 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9324112e51bee406916a385aca28ddff/C of 9324112e51bee406916a385aca28ddff into 175c62654f9f4e99ab1927a25381a5c4(size=13.3 K), total size for store is 13.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:21:23,992 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9324112e51bee406916a385aca28ddff: 2024-11-28T09:21:23,992 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff., storeName=9324112e51bee406916a385aca28ddff/C, priority=13, startTime=1732785683860; duration=0sec 2024-11-28T09:21:23,992 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:21:23,992 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9324112e51bee406916a385aca28ddff:C 2024-11-28T09:21:24,008 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:24,008 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=35 2024-11-28T09:21:24,009 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:24,009 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegion(2837): Flushing 9324112e51bee406916a385aca28ddff 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-28T09:21:24,009 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=A 2024-11-28T09:21:24,009 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:24,009 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=B 2024-11-28T09:21:24,009 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:24,010 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=C 2024-11-28T09:21:24,010 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:24,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/7d8941db1ebd40d68b4d6be2deeaec6e is 50, key is test_row_0/A:col10/1732785682999/Put/seqid=0 2024-11-28T09:21:24,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741959_1135 (size=12301) 2024-11-28T09:21:24,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-28T09:21:24,038 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=569 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/7d8941db1ebd40d68b4d6be2deeaec6e 2024-11-28T09:21:24,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/41f02988a58d43bdb6eeca37b302114a is 50, key is test_row_0/B:col10/1732785682999/Put/seqid=0 2024-11-28T09:21:24,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 9324112e51bee406916a385aca28ddff 2024-11-28T09:21:24,080 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. as already flushing 2024-11-28T09:21:24,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741960_1136 (size=12301) 2024-11-28T09:21:24,091 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=569 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/41f02988a58d43bdb6eeca37b302114a 2024-11-28T09:21:24,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/1fd7e8af29ec46219be65a9f70a134b8 is 50, key is test_row_0/C:col10/1732785682999/Put/seqid=0 2024-11-28T09:21:24,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741961_1137 (size=12301) 2024-11-28T09:21:24,117 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:24,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58910 deadline: 1732785744114, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:24,118 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:24,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 226 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58928 deadline: 1732785744115, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:24,119 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:24,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58950 deadline: 1732785744117, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:24,120 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:24,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 226 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58952 deadline: 1732785744118, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:24,127 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:24,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 234 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58956 deadline: 1732785744125, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:24,220 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:24,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58910 deadline: 1732785744218, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:24,221 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:24,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 228 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58928 deadline: 1732785744220, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:24,222 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:24,222 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:24,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58950 deadline: 1732785744220, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:24,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 228 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58952 deadline: 1732785744221, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:24,423 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:24,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 230 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58928 deadline: 1732785744422, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:24,423 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:24,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58910 deadline: 1732785744422, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:24,425 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:24,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58950 deadline: 1732785744423, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:24,426 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:24,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 230 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58952 deadline: 1732785744425, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:24,512 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=569 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/1fd7e8af29ec46219be65a9f70a134b8 2024-11-28T09:21:24,518 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/7d8941db1ebd40d68b4d6be2deeaec6e as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/7d8941db1ebd40d68b4d6be2deeaec6e 2024-11-28T09:21:24,523 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/7d8941db1ebd40d68b4d6be2deeaec6e, entries=150, sequenceid=569, filesize=12.0 K 2024-11-28T09:21:24,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/41f02988a58d43bdb6eeca37b302114a as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/41f02988a58d43bdb6eeca37b302114a 2024-11-28T09:21:24,529 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/41f02988a58d43bdb6eeca37b302114a, entries=150, sequenceid=569, filesize=12.0 K 2024-11-28T09:21:24,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/1fd7e8af29ec46219be65a9f70a134b8 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/1fd7e8af29ec46219be65a9f70a134b8 2024-11-28T09:21:24,536 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/1fd7e8af29ec46219be65a9f70a134b8, entries=150, sequenceid=569, filesize=12.0 K 2024-11-28T09:21:24,539 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for 9324112e51bee406916a385aca28ddff in 530ms, sequenceid=569, compaction requested=false 2024-11-28T09:21:24,539 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.HRegion(2538): Flush status journal for 9324112e51bee406916a385aca28ddff: 2024-11-28T09:21:24,539 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:24,539 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=35}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=35 2024-11-28T09:21:24,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4106): Remote procedure done, pid=35 2024-11-28T09:21:24,543 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=35, resume processing ppid=34 2024-11-28T09:21:24,543 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=35, ppid=34, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6060 sec 2024-11-28T09:21:24,545 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=34, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=34, table=TestAcidGuarantees in 1.6120 sec 2024-11-28T09:21:24,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 9324112e51bee406916a385aca28ddff 2024-11-28T09:21:24,727 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9324112e51bee406916a385aca28ddff 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-28T09:21:24,729 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=A 2024-11-28T09:21:24,729 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:24,729 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=B 2024-11-28T09:21:24,729 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:24,729 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=C 2024-11-28T09:21:24,730 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:24,738 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:24,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58950 deadline: 1732785744734, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:24,738 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:24,738 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:24,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58910 deadline: 1732785744734, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:24,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 235 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58928 deadline: 1732785744736, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:24,740 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:24,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 234 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58952 deadline: 1732785744737, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:24,740 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/6b2d91ed121844be96a4c2c5328e813b is 50, key is test_row_0/A:col10/1732785684726/Put/seqid=0 2024-11-28T09:21:24,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741962_1138 (size=14741) 2024-11-28T09:21:24,760 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=596 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/6b2d91ed121844be96a4c2c5328e813b 2024-11-28T09:21:24,771 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/467d7d71be85480fb6cbb77406b8c3e1 is 50, key is test_row_0/B:col10/1732785684726/Put/seqid=0 2024-11-28T09:21:24,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741963_1139 (size=12301) 2024-11-28T09:21:24,840 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:24,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58950 deadline: 1732785744839, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:24,840 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:24,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58910 deadline: 1732785744839, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:24,843 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:24,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 237 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58928 deadline: 1732785744840, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:24,844 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:24,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 236 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58952 deadline: 1732785744842, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:25,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-28T09:21:25,038 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 34 completed 2024-11-28T09:21:25,040 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T09:21:25,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] procedure2.ProcedureExecutor(1098): Stored pid=36, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=36, table=TestAcidGuarantees 2024-11-28T09:21:25,042 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=36, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=36, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T09:21:25,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=36 2024-11-28T09:21:25,043 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=36, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=36, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T09:21:25,043 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=37, ppid=36, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T09:21:25,044 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:25,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58950 deadline: 1732785745043, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:25,045 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:25,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58910 deadline: 1732785745043, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:25,047 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:25,047 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:25,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 239 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58928 deadline: 1732785745047, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:25,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 238 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58952 deadline: 1732785745047, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:25,137 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:25,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 236 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58956 deadline: 1732785745136, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:25,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=36 2024-11-28T09:21:25,198 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:25,198 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=37 2024-11-28T09:21:25,199 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:25,199 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. as already flushing 2024-11-28T09:21:25,199 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:25,199 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=37}] handler.RSProcedureHandler(58): pid=37 java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:25,199 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=37 java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:25,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=37 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:25,204 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=596 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/467d7d71be85480fb6cbb77406b8c3e1 2024-11-28T09:21:25,214 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/b08f0729c2c24571afc5d7b423f94cad is 50, key is test_row_0/C:col10/1732785684726/Put/seqid=0 2024-11-28T09:21:25,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741964_1140 (size=12301) 2024-11-28T09:21:25,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=36 2024-11-28T09:21:25,347 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:25,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58950 deadline: 1732785745347, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:25,348 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:25,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58910 deadline: 1732785745347, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:25,350 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:25,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 240 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58952 deadline: 1732785745348, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:25,351 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:25,353 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=37 2024-11-28T09:21:25,353 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:25,353 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. as already flushing 2024-11-28T09:21:25,353 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:25,353 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=37}] handler.RSProcedureHandler(58): pid=37 java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:25,353 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=37 java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:25,353 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:25,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 241 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58928 deadline: 1732785745353, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:25,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=37 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:25,505 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:25,506 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=37 2024-11-28T09:21:25,506 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:25,506 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. as already flushing 2024-11-28T09:21:25,506 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:25,506 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=37}] handler.RSProcedureHandler(58): pid=37 java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:25,506 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=37 java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:25,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=37 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:25,628 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=596 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/b08f0729c2c24571afc5d7b423f94cad 2024-11-28T09:21:25,633 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/6b2d91ed121844be96a4c2c5328e813b as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/6b2d91ed121844be96a4c2c5328e813b 2024-11-28T09:21:25,638 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/6b2d91ed121844be96a4c2c5328e813b, entries=200, sequenceid=596, filesize=14.4 K 2024-11-28T09:21:25,639 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/467d7d71be85480fb6cbb77406b8c3e1 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/467d7d71be85480fb6cbb77406b8c3e1 2024-11-28T09:21:25,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=36 2024-11-28T09:21:25,646 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/467d7d71be85480fb6cbb77406b8c3e1, entries=150, sequenceid=596, filesize=12.0 K 2024-11-28T09:21:25,648 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/b08f0729c2c24571afc5d7b423f94cad as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/b08f0729c2c24571afc5d7b423f94cad 2024-11-28T09:21:25,655 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/b08f0729c2c24571afc5d7b423f94cad, entries=150, sequenceid=596, filesize=12.0 K 2024-11-28T09:21:25,657 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for 9324112e51bee406916a385aca28ddff in 931ms, sequenceid=596, compaction requested=true 2024-11-28T09:21:25,657 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9324112e51bee406916a385aca28ddff: 2024-11-28T09:21:25,657 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:21:25,657 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9324112e51bee406916a385aca28ddff:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T09:21:25,657 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:21:25,657 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9324112e51bee406916a385aca28ddff:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T09:21:25,657 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:21:25,657 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9324112e51bee406916a385aca28ddff:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T09:21:25,657 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-28T09:21:25,658 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:21:25,659 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:25,659 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40671 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:21:25,659 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1540): 9324112e51bee406916a385aca28ddff/A is initiating minor compaction (all files) 2024-11-28T09:21:25,659 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9324112e51bee406916a385aca28ddff/A in TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:25,660 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/0ed7a2e236be420d9251d50ecd06b945, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/7d8941db1ebd40d68b4d6be2deeaec6e, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/6b2d91ed121844be96a4c2c5328e813b] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp, totalSize=39.7 K 2024-11-28T09:21:25,660 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=37 2024-11-28T09:21:25,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:25,660 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38231 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:21:25,660 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.HRegion(2837): Flushing 9324112e51bee406916a385aca28ddff 3/3 column families, dataSize=40.25 KB heapSize=106.22 KB 2024-11-28T09:21:25,660 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): 9324112e51bee406916a385aca28ddff/B is initiating minor compaction (all files) 2024-11-28T09:21:25,660 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9324112e51bee406916a385aca28ddff/B in TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:25,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=A 2024-11-28T09:21:25,660 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/bca48b4b5b0548d28e2ddd330218a98b, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/41f02988a58d43bdb6eeca37b302114a, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/467d7d71be85480fb6cbb77406b8c3e1] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp, totalSize=37.3 K 2024-11-28T09:21:25,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:25,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=B 2024-11-28T09:21:25,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:25,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=C 2024-11-28T09:21:25,660 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0ed7a2e236be420d9251d50ecd06b945, keycount=150, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=554, earliestPutTs=1732785682351 2024-11-28T09:21:25,661 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:25,661 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting bca48b4b5b0548d28e2ddd330218a98b, keycount=150, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=554, earliestPutTs=1732785682351 2024-11-28T09:21:25,662 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 41f02988a58d43bdb6eeca37b302114a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=569, earliestPutTs=1732785682992 2024-11-28T09:21:25,662 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7d8941db1ebd40d68b4d6be2deeaec6e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=569, earliestPutTs=1732785682992 2024-11-28T09:21:25,663 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 467d7d71be85480fb6cbb77406b8c3e1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=596, earliestPutTs=1732785684092 2024-11-28T09:21:25,663 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6b2d91ed121844be96a4c2c5328e813b, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=596, earliestPutTs=1732785684092 2024-11-28T09:21:25,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=37}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/805595b702b443508104f73ff6b7a192 is 50, key is test_row_0/A:col10/1732785684730/Put/seqid=0 2024-11-28T09:21:25,676 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9324112e51bee406916a385aca28ddff#B#compaction#127 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:21:25,677 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/c19b38e83b0748c18cb1158e235c9ad0 is 50, key is test_row_0/B:col10/1732785684726/Put/seqid=0 2024-11-28T09:21:25,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741965_1141 (size=12301) 2024-11-28T09:21:25,681 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=605 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/805595b702b443508104f73ff6b7a192 2024-11-28T09:21:25,688 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9324112e51bee406916a385aca28ddff#A#compaction#128 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:21:25,689 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/8889dd9955c14e75b50f51169ca9da4b is 50, key is test_row_0/A:col10/1732785684726/Put/seqid=0 2024-11-28T09:21:25,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=37}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/c3ce3fdba0c84f8781ffea3e20e4f2d0 is 50, key is test_row_0/B:col10/1732785684730/Put/seqid=0 2024-11-28T09:21:25,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741966_1142 (size=13731) 2024-11-28T09:21:25,702 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/c19b38e83b0748c18cb1158e235c9ad0 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/c19b38e83b0748c18cb1158e235c9ad0 2024-11-28T09:21:25,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741967_1143 (size=13731) 2024-11-28T09:21:25,708 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9324112e51bee406916a385aca28ddff/B of 9324112e51bee406916a385aca28ddff into c19b38e83b0748c18cb1158e235c9ad0(size=13.4 K), total size for store is 13.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:21:25,708 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9324112e51bee406916a385aca28ddff: 2024-11-28T09:21:25,708 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff., storeName=9324112e51bee406916a385aca28ddff/B, priority=13, startTime=1732785685657; duration=0sec 2024-11-28T09:21:25,708 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:21:25,708 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9324112e51bee406916a385aca28ddff:B 2024-11-28T09:21:25,708 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:21:25,710 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38231 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:21:25,710 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): 9324112e51bee406916a385aca28ddff/C is initiating minor compaction (all files) 2024-11-28T09:21:25,710 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9324112e51bee406916a385aca28ddff/C in TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:25,710 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/175c62654f9f4e99ab1927a25381a5c4, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/1fd7e8af29ec46219be65a9f70a134b8, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/b08f0729c2c24571afc5d7b423f94cad] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp, totalSize=37.3 K 2024-11-28T09:21:25,710 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 175c62654f9f4e99ab1927a25381a5c4, keycount=150, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=554, earliestPutTs=1732785682351 2024-11-28T09:21:25,711 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 1fd7e8af29ec46219be65a9f70a134b8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=569, earliestPutTs=1732785682992 2024-11-28T09:21:25,711 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting b08f0729c2c24571afc5d7b423f94cad, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=596, earliestPutTs=1732785684092 2024-11-28T09:21:25,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741968_1144 (size=12301) 2024-11-28T09:21:25,723 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9324112e51bee406916a385aca28ddff#C#compaction#130 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:21:25,724 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/c855f330c9914d16b2558fb9c67b7642 is 50, key is test_row_0/C:col10/1732785684726/Put/seqid=0 2024-11-28T09:21:25,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741969_1145 (size=13731) 2024-11-28T09:21:25,852 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. as already flushing 2024-11-28T09:21:25,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 9324112e51bee406916a385aca28ddff 2024-11-28T09:21:25,877 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:25,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 248 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58928 deadline: 1732785745874, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:25,877 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:25,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58950 deadline: 1732785745875, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:25,880 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:25,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 247 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58952 deadline: 1732785745875, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:25,880 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:25,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 223 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58910 deadline: 1732785745876, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:25,979 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:25,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 250 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58928 deadline: 1732785745978, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:25,980 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:25,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58950 deadline: 1732785745978, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:25,983 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:25,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 249 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58952 deadline: 1732785745981, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:25,983 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:25,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 225 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58910 deadline: 1732785745981, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:26,116 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/8889dd9955c14e75b50f51169ca9da4b as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/8889dd9955c14e75b50f51169ca9da4b 2024-11-28T09:21:26,116 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=605 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/c3ce3fdba0c84f8781ffea3e20e4f2d0 2024-11-28T09:21:26,123 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9324112e51bee406916a385aca28ddff/A of 9324112e51bee406916a385aca28ddff into 8889dd9955c14e75b50f51169ca9da4b(size=13.4 K), total size for store is 13.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:21:26,123 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9324112e51bee406916a385aca28ddff: 2024-11-28T09:21:26,124 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff., storeName=9324112e51bee406916a385aca28ddff/A, priority=13, startTime=1732785685657; duration=0sec 2024-11-28T09:21:26,124 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:21:26,124 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9324112e51bee406916a385aca28ddff:A 2024-11-28T09:21:26,137 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/c855f330c9914d16b2558fb9c67b7642 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/c855f330c9914d16b2558fb9c67b7642 2024-11-28T09:21:26,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=37}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/5039dd90b70f4627a0f9ec5cf39f4182 is 50, key is test_row_0/C:col10/1732785684730/Put/seqid=0 2024-11-28T09:21:26,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=36 2024-11-28T09:21:26,146 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9324112e51bee406916a385aca28ddff/C of 9324112e51bee406916a385aca28ddff into c855f330c9914d16b2558fb9c67b7642(size=13.4 K), total size for store is 13.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:21:26,147 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9324112e51bee406916a385aca28ddff: 2024-11-28T09:21:26,147 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff., storeName=9324112e51bee406916a385aca28ddff/C, priority=13, startTime=1732785685657; duration=0sec 2024-11-28T09:21:26,147 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:21:26,147 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9324112e51bee406916a385aca28ddff:C 2024-11-28T09:21:26,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741970_1146 (size=12301) 2024-11-28T09:21:26,162 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=605 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/5039dd90b70f4627a0f9ec5cf39f4182 2024-11-28T09:21:26,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/805595b702b443508104f73ff6b7a192 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/805595b702b443508104f73ff6b7a192 2024-11-28T09:21:26,180 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/805595b702b443508104f73ff6b7a192, entries=150, sequenceid=605, filesize=12.0 K 2024-11-28T09:21:26,182 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:26,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 252 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58928 deadline: 1732785746181, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:26,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/c3ce3fdba0c84f8781ffea3e20e4f2d0 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/c3ce3fdba0c84f8781ffea3e20e4f2d0 2024-11-28T09:21:26,184 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:26,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 226 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58950 deadline: 1732785746182, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:26,185 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:26,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 251 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58952 deadline: 1732785746184, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:26,186 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:26,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 227 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58910 deadline: 1732785746185, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:26,188 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/c3ce3fdba0c84f8781ffea3e20e4f2d0, entries=150, sequenceid=605, filesize=12.0 K 2024-11-28T09:21:26,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/5039dd90b70f4627a0f9ec5cf39f4182 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/5039dd90b70f4627a0f9ec5cf39f4182 2024-11-28T09:21:26,195 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/5039dd90b70f4627a0f9ec5cf39f4182, entries=150, sequenceid=605, filesize=12.0 K 2024-11-28T09:21:26,196 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=161.02 KB/164880 for 9324112e51bee406916a385aca28ddff in 536ms, sequenceid=605, compaction requested=false 2024-11-28T09:21:26,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.HRegion(2538): Flush status journal for 9324112e51bee406916a385aca28ddff: 2024-11-28T09:21:26,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:26,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=37}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=37 2024-11-28T09:21:26,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4106): Remote procedure done, pid=37 2024-11-28T09:21:26,199 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=37, resume processing ppid=36 2024-11-28T09:21:26,199 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=37, ppid=36, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1540 sec 2024-11-28T09:21:26,201 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=36, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=36, table=TestAcidGuarantees in 1.1600 sec 2024-11-28T09:21:26,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 9324112e51bee406916a385aca28ddff 2024-11-28T09:21:26,488 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9324112e51bee406916a385aca28ddff 3/3 column families, dataSize=167.72 KB heapSize=440.20 KB 2024-11-28T09:21:26,488 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=A 2024-11-28T09:21:26,488 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:26,488 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=B 2024-11-28T09:21:26,489 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:26,489 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=C 2024-11-28T09:21:26,489 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:26,494 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/ba432f88642f438e80655d15919e4a84 is 50, key is test_row_0/A:col10/1732785686485/Put/seqid=0 2024-11-28T09:21:26,501 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:26,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 254 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58952 deadline: 1732785746495, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:26,504 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:26,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 256 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58928 deadline: 1732785746501, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:26,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741971_1147 (size=14741) 2024-11-28T09:21:26,505 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:26,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 230 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58950 deadline: 1732785746501, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:26,505 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:26,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 230 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58910 deadline: 1732785746502, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:26,506 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=636 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/ba432f88642f438e80655d15919e4a84 2024-11-28T09:21:26,530 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/ed6a5a490dd74e379d48e95c0342d4db is 50, key is test_row_0/B:col10/1732785686485/Put/seqid=0 2024-11-28T09:21:26,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741972_1148 (size=12301) 2024-11-28T09:21:26,539 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=636 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/ed6a5a490dd74e379d48e95c0342d4db 2024-11-28T09:21:26,552 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/05a77c77318342619b315711fecc20a3 is 50, key is test_row_0/C:col10/1732785686485/Put/seqid=0 2024-11-28T09:21:26,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741973_1149 (size=12301) 2024-11-28T09:21:26,603 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:26,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 256 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58952 deadline: 1732785746603, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:26,607 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:26,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 258 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58928 deadline: 1732785746606, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:26,607 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:26,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 232 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58950 deadline: 1732785746606, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:26,608 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:26,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 232 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58910 deadline: 1732785746606, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:26,685 DEBUG [Thread-160 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1b5f27aa to 127.0.0.1:53251 2024-11-28T09:21:26,685 DEBUG [Thread-160 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T09:21:26,687 DEBUG [Thread-166 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1d2a8e08 to 127.0.0.1:53251 2024-11-28T09:21:26,688 DEBUG [Thread-166 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T09:21:26,688 DEBUG [Thread-162 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x295cb1ac to 127.0.0.1:53251 2024-11-28T09:21:26,688 DEBUG [Thread-162 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T09:21:26,688 DEBUG [Thread-164 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x490457fd to 127.0.0.1:53251 2024-11-28T09:21:26,688 DEBUG [Thread-164 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T09:21:26,806 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:26,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 258 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58952 deadline: 1732785746805, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:26,808 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:26,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 260 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58928 deadline: 1732785746808, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:26,809 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:26,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 234 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58910 deadline: 1732785746809, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:26,809 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:26,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 234 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:58950 deadline: 1732785746809, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:26,983 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=636 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/05a77c77318342619b315711fecc20a3 2024-11-28T09:21:26,989 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/ba432f88642f438e80655d15919e4a84 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/ba432f88642f438e80655d15919e4a84 2024-11-28T09:21:26,994 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/ba432f88642f438e80655d15919e4a84, entries=200, sequenceid=636, filesize=14.4 K 2024-11-28T09:21:26,994 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/ed6a5a490dd74e379d48e95c0342d4db as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/ed6a5a490dd74e379d48e95c0342d4db 2024-11-28T09:21:26,999 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/ed6a5a490dd74e379d48e95c0342d4db, entries=150, sequenceid=636, filesize=12.0 K 2024-11-28T09:21:26,999 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/05a77c77318342619b315711fecc20a3 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/05a77c77318342619b315711fecc20a3 2024-11-28T09:21:27,003 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/05a77c77318342619b315711fecc20a3, entries=150, sequenceid=636, filesize=12.0 K 2024-11-28T09:21:27,004 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=33.54 KB/34350 for 9324112e51bee406916a385aca28ddff in 516ms, sequenceid=636, compaction requested=true 2024-11-28T09:21:27,005 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9324112e51bee406916a385aca28ddff: 2024-11-28T09:21:27,005 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9324112e51bee406916a385aca28ddff:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T09:21:27,005 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:21:27,005 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:21:27,005 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9324112e51bee406916a385aca28ddff:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T09:21:27,005 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:21:27,005 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:21:27,005 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9324112e51bee406916a385aca28ddff:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T09:21:27,005 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:21:27,006 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:21:27,006 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40773 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:21:27,006 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): 9324112e51bee406916a385aca28ddff/B is initiating minor compaction (all files) 2024-11-28T09:21:27,006 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1540): 9324112e51bee406916a385aca28ddff/A is initiating minor compaction (all files) 2024-11-28T09:21:27,006 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9324112e51bee406916a385aca28ddff/B in TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:27,006 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9324112e51bee406916a385aca28ddff/A in TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:27,006 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/8889dd9955c14e75b50f51169ca9da4b, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/805595b702b443508104f73ff6b7a192, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/ba432f88642f438e80655d15919e4a84] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp, totalSize=39.8 K 2024-11-28T09:21:27,006 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/c19b38e83b0748c18cb1158e235c9ad0, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/c3ce3fdba0c84f8781ffea3e20e4f2d0, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/ed6a5a490dd74e379d48e95c0342d4db] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp, totalSize=37.4 K 2024-11-28T09:21:27,007 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting c19b38e83b0748c18cb1158e235c9ad0, keycount=150, bloomtype=ROW, size=13.4 K, encoding=NONE, compression=NONE, seqNum=596, earliestPutTs=1732785684092 2024-11-28T09:21:27,007 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8889dd9955c14e75b50f51169ca9da4b, keycount=150, bloomtype=ROW, size=13.4 K, encoding=NONE, compression=NONE, seqNum=596, earliestPutTs=1732785684092 2024-11-28T09:21:27,007 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting c3ce3fdba0c84f8781ffea3e20e4f2d0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=605, earliestPutTs=1732785684730 2024-11-28T09:21:27,007 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 805595b702b443508104f73ff6b7a192, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=605, earliestPutTs=1732785684730 2024-11-28T09:21:27,007 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting ed6a5a490dd74e379d48e95c0342d4db, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=636, earliestPutTs=1732785685873 2024-11-28T09:21:27,008 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting ba432f88642f438e80655d15919e4a84, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=636, earliestPutTs=1732785685869 2024-11-28T09:21:27,015 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9324112e51bee406916a385aca28ddff#B#compaction#135 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:21:27,015 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9324112e51bee406916a385aca28ddff#A#compaction#136 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:21:27,015 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/e99f1e753b14478aa81b0380d067148c is 50, key is test_row_0/B:col10/1732785686485/Put/seqid=0 2024-11-28T09:21:27,016 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/bdf8bb7909fc467183a433a050cd26e2 is 50, key is test_row_0/A:col10/1732785686485/Put/seqid=0 2024-11-28T09:21:27,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741974_1150 (size=13833) 2024-11-28T09:21:27,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741975_1151 (size=13833) 2024-11-28T09:21:27,025 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/bdf8bb7909fc467183a433a050cd26e2 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/bdf8bb7909fc467183a433a050cd26e2 2024-11-28T09:21:27,031 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9324112e51bee406916a385aca28ddff/A of 9324112e51bee406916a385aca28ddff into bdf8bb7909fc467183a433a050cd26e2(size=13.5 K), total size for store is 13.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:21:27,031 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9324112e51bee406916a385aca28ddff: 2024-11-28T09:21:27,031 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff., storeName=9324112e51bee406916a385aca28ddff/A, priority=13, startTime=1732785687005; duration=0sec 2024-11-28T09:21:27,031 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:21:27,031 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9324112e51bee406916a385aca28ddff:A 2024-11-28T09:21:27,031 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:21:27,032 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:21:27,032 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1540): 9324112e51bee406916a385aca28ddff/C is initiating minor compaction (all files) 2024-11-28T09:21:27,032 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 9324112e51bee406916a385aca28ddff/C in TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:27,032 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/c855f330c9914d16b2558fb9c67b7642, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/5039dd90b70f4627a0f9ec5cf39f4182, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/05a77c77318342619b315711fecc20a3] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp, totalSize=37.4 K 2024-11-28T09:21:27,033 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting c855f330c9914d16b2558fb9c67b7642, keycount=150, bloomtype=ROW, size=13.4 K, encoding=NONE, compression=NONE, seqNum=596, earliestPutTs=1732785684092 2024-11-28T09:21:27,033 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5039dd90b70f4627a0f9ec5cf39f4182, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=605, earliestPutTs=1732785684730 2024-11-28T09:21:27,033 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 05a77c77318342619b315711fecc20a3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=636, earliestPutTs=1732785685873 2024-11-28T09:21:27,041 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9324112e51bee406916a385aca28ddff#C#compaction#137 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:21:27,041 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/940f62b03799429f8f0cab4027ff014d is 50, key is test_row_0/C:col10/1732785686485/Put/seqid=0 2024-11-28T09:21:27,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741976_1152 (size=13833) 2024-11-28T09:21:27,111 DEBUG [Thread-153 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1324ee83 to 127.0.0.1:53251 2024-11-28T09:21:27,111 DEBUG [Thread-157 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x048068a5 to 127.0.0.1:53251 2024-11-28T09:21:27,111 DEBUG [Thread-153 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T09:21:27,112 DEBUG [Thread-157 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T09:21:27,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 9324112e51bee406916a385aca28ddff 2024-11-28T09:21:27,113 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 9324112e51bee406916a385aca28ddff 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-28T09:21:27,113 DEBUG [Thread-149 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x53b8a93e to 127.0.0.1:53251 2024-11-28T09:21:27,113 DEBUG [Thread-149 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T09:21:27,114 DEBUG [Thread-151 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6f343a4d to 127.0.0.1:53251 2024-11-28T09:21:27,114 DEBUG [Thread-151 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T09:21:27,114 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=A 2024-11-28T09:21:27,114 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:27,115 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=B 2024-11-28T09:21:27,115 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:27,115 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=C 2024-11-28T09:21:27,115 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:27,119 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/3ecaa57222e244dca9a6d329160e7b8e is 50, key is test_row_0/A:col10/1732785687111/Put/seqid=0 2024-11-28T09:21:27,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741977_1153 (size=12301) 2024-11-28T09:21:27,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=36 2024-11-28T09:21:27,147 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 36 completed 2024-11-28T09:21:27,147 DEBUG [Thread-155 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x478bae6b to 127.0.0.1:53251 2024-11-28T09:21:27,148 DEBUG [Thread-155 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T09:21:27,148 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-28T09:21:27,148 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 90 2024-11-28T09:21:27,148 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 90 2024-11-28T09:21:27,148 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 106 2024-11-28T09:21:27,148 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 116 2024-11-28T09:21:27,148 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 108 2024-11-28T09:21:27,148 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-28T09:21:27,148 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5355 2024-11-28T09:21:27,148 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5273 2024-11-28T09:21:27,148 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-28T09:21:27,148 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2374 2024-11-28T09:21:27,148 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7122 rows 2024-11-28T09:21:27,148 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2373 2024-11-28T09:21:27,148 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 7119 rows 2024-11-28T09:21:27,148 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-28T09:21:27,148 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x63607639 to 127.0.0.1:53251 2024-11-28T09:21:27,148 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T09:21:27,151 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-28T09:21:27,158 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-28T09:21:27,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] procedure2.ProcedureExecutor(1098): Stored pid=38, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-28T09:21:27,167 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732785687167"}]},"ts":"1732785687167"} 2024-11-28T09:21:27,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=38 2024-11-28T09:21:27,169 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-28T09:21:27,171 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-28T09:21:27,173 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=39, ppid=38, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-28T09:21:27,177 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=9324112e51bee406916a385aca28ddff, UNASSIGN}] 2024-11-28T09:21:27,178 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=9324112e51bee406916a385aca28ddff, UNASSIGN 2024-11-28T09:21:27,179 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=40 updating hbase:meta row=9324112e51bee406916a385aca28ddff, regionState=CLOSING, regionLocation=363d8d38a970,33819,1732785660637 2024-11-28T09:21:27,180 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-28T09:21:27,180 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=41, ppid=40, state=RUNNABLE; CloseRegionProcedure 9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637}] 2024-11-28T09:21:27,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=38 2024-11-28T09:21:27,336 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:27,337 INFO [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] handler.UnassignRegionHandler(124): Close 9324112e51bee406916a385aca28ddff 2024-11-28T09:21:27,338 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-28T09:21:27,338 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1681): Closing 9324112e51bee406916a385aca28ddff, disabling compactions & flushes 2024-11-28T09:21:27,338 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1942): waiting for 2 compactions & cache flush to complete for region TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:27,425 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/e99f1e753b14478aa81b0380d067148c as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/e99f1e753b14478aa81b0380d067148c 2024-11-28T09:21:27,430 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9324112e51bee406916a385aca28ddff/B of 9324112e51bee406916a385aca28ddff into e99f1e753b14478aa81b0380d067148c(size=13.5 K), total size for store is 13.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:21:27,430 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9324112e51bee406916a385aca28ddff: 2024-11-28T09:21:27,430 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff., storeName=9324112e51bee406916a385aca28ddff/B, priority=13, startTime=1732785687005; duration=0sec 2024-11-28T09:21:27,430 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:21:27,430 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9324112e51bee406916a385aca28ddff:B 2024-11-28T09:21:27,450 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/940f62b03799429f8f0cab4027ff014d as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/940f62b03799429f8f0cab4027ff014d 2024-11-28T09:21:27,455 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 9324112e51bee406916a385aca28ddff/C of 9324112e51bee406916a385aca28ddff into 940f62b03799429f8f0cab4027ff014d(size=13.5 K), total size for store is 13.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:21:27,455 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 9324112e51bee406916a385aca28ddff: 2024-11-28T09:21:27,455 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1942): waiting for 0 compactions & cache flush to complete for region TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:27,455 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff., storeName=9324112e51bee406916a385aca28ddff/C, priority=13, startTime=1732785687005; duration=0sec 2024-11-28T09:21:27,455 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:21:27,455 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9324112e51bee406916a385aca28ddff:C 2024-11-28T09:21:27,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=38 2024-11-28T09:21:27,528 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=649 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/3ecaa57222e244dca9a6d329160e7b8e 2024-11-28T09:21:27,535 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/c77c5cca74524a6f863f6d33403dc785 is 50, key is test_row_0/B:col10/1732785687111/Put/seqid=0 2024-11-28T09:21:27,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741978_1154 (size=12301) 2024-11-28T09:21:27,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=38 2024-11-28T09:21:27,939 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=649 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/c77c5cca74524a6f863f6d33403dc785 2024-11-28T09:21:27,946 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/4400d95e83ee40759cf81fd6e78ed260 is 50, key is test_row_0/C:col10/1732785687111/Put/seqid=0 2024-11-28T09:21:27,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741979_1155 (size=12301) 2024-11-28T09:21:28,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=38 2024-11-28T09:21:28,351 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=649 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/4400d95e83ee40759cf81fd6e78ed260 2024-11-28T09:21:28,356 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/3ecaa57222e244dca9a6d329160e7b8e as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/3ecaa57222e244dca9a6d329160e7b8e 2024-11-28T09:21:28,361 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/3ecaa57222e244dca9a6d329160e7b8e, entries=150, sequenceid=649, filesize=12.0 K 2024-11-28T09:21:28,362 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/c77c5cca74524a6f863f6d33403dc785 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/c77c5cca74524a6f863f6d33403dc785 2024-11-28T09:21:28,366 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/c77c5cca74524a6f863f6d33403dc785, entries=150, sequenceid=649, filesize=12.0 K 2024-11-28T09:21:28,367 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/4400d95e83ee40759cf81fd6e78ed260 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/4400d95e83ee40759cf81fd6e78ed260 2024-11-28T09:21:28,371 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/4400d95e83ee40759cf81fd6e78ed260, entries=150, sequenceid=649, filesize=12.0 K 2024-11-28T09:21:28,372 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=6.71 KB/6870 for 9324112e51bee406916a385aca28ddff in 1259ms, sequenceid=649, compaction requested=false 2024-11-28T09:21:28,372 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 9324112e51bee406916a385aca28ddff: 2024-11-28T09:21:28,372 INFO [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:28,372 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:28,372 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. after waiting 0 ms 2024-11-28T09:21:28,372 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:28,372 INFO [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(2837): Flushing 9324112e51bee406916a385aca28ddff 3/3 column families, dataSize=6.71 KB heapSize=18.33 KB 2024-11-28T09:21:28,372 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=A 2024-11-28T09:21:28,373 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:28,373 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=B 2024-11-28T09:21:28,373 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:28,373 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 9324112e51bee406916a385aca28ddff, store=C 2024-11-28T09:21:28,373 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:28,376 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/d06c92b7b0af44a3bee5de614352d08d is 50, key is test_row_2/A:col10/1732785687146/Put/seqid=0 2024-11-28T09:21:28,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741980_1156 (size=7415) 2024-11-28T09:21:28,781 INFO [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.24 KB at sequenceid=655 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/d06c92b7b0af44a3bee5de614352d08d 2024-11-28T09:21:28,789 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/20553519ce1e4afbb70eef09c30331c4 is 50, key is test_row_2/B:col10/1732785687146/Put/seqid=0 2024-11-28T09:21:28,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741981_1157 (size=7415) 2024-11-28T09:21:28,984 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-28T09:21:29,194 INFO [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.24 KB at sequenceid=655 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/20553519ce1e4afbb70eef09c30331c4 2024-11-28T09:21:29,202 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/8c3a09d90bf149b4b9fa5ce44c70d269 is 50, key is test_row_2/C:col10/1732785687146/Put/seqid=0 2024-11-28T09:21:29,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741982_1158 (size=7415) 2024-11-28T09:21:29,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=38 2024-11-28T09:21:29,607 INFO [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.24 KB at sequenceid=655 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/8c3a09d90bf149b4b9fa5ce44c70d269 2024-11-28T09:21:29,612 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/A/d06c92b7b0af44a3bee5de614352d08d as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/d06c92b7b0af44a3bee5de614352d08d 2024-11-28T09:21:29,617 INFO [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/d06c92b7b0af44a3bee5de614352d08d, entries=50, sequenceid=655, filesize=7.2 K 2024-11-28T09:21:29,618 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/B/20553519ce1e4afbb70eef09c30331c4 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/20553519ce1e4afbb70eef09c30331c4 2024-11-28T09:21:29,622 INFO [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/20553519ce1e4afbb70eef09c30331c4, entries=50, sequenceid=655, filesize=7.2 K 2024-11-28T09:21:29,622 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/.tmp/C/8c3a09d90bf149b4b9fa5ce44c70d269 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/8c3a09d90bf149b4b9fa5ce44c70d269 2024-11-28T09:21:29,627 INFO [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/8c3a09d90bf149b4b9fa5ce44c70d269, entries=50, sequenceid=655, filesize=7.2 K 2024-11-28T09:21:29,628 INFO [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(3040): Finished flush of dataSize ~6.71 KB/6870, heapSize ~18.28 KB/18720, currentSize=0 B/0 for 9324112e51bee406916a385aca28ddff in 1255ms, sequenceid=655, compaction requested=true 2024-11-28T09:21:29,628 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/7e1ea91b04fe4819a0bfc90a69f46818, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/a9882ac20a764f4ba222c6a9499bf8c7, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/fe05bbe89ff64b239bff72b0430fd8bd, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/cab232f0eb2045858f38cdbc52497af7, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/9f266bdf4df248049c2b1263fbf5ec5f, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/28b9dd5e179844fc9fa9d9b8d66aa7f4, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/966aacd9447d40bbbbd1c89b498116a5, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/6df0d666ff304643b2f5deea360b3b10, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/e7ff64d653684d10a9c600f549fd9e52, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/2d0dc6b9254445c5b006f9826cfe0c66, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/f74483259a0041ac9413b50906cdb3c1, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/d5687b2e4d7843278bc0f46e8aed4fbb, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/bbb746abcd984554a68e47fca6555c48, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/c5c597cd416b481ebb083f053e6798de, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/2165e11f7eba40e3b1b206abf2abab25, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/ad19c5c674934ab3af12c773abf1670f, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/4b49c21658d8467f974b4b395d68604c, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/82b20c17b539492582220cafb69aa353, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/ffa8316651904c74863181b981a12aa8, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/48837577ee0740b3bb2aba337d3b20ce, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/ad7dade332994af6b86152fdb98af785, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/e5c8c911279b47ada50487b0f0e6196d, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/5b6c2145bf6a47ec81cb0efb1c001e29, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/d328d7f0c2584593bc921fe9950dab16, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/da83118749c94a439c19261f241693f5, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/93ff1aad62b2484ebd241aafed147a6d, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/5e03a7525e834955ac51ac8fb62f4365, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/47c3fe778b884a88a31113570baa635e, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/1085bbdd55284d5e9d8a25e551c9ecca, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/4a014196b8284b5f9565b760fb79f4a2, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/6579b96d8ff3449fb4dc8231d53c77c5, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/e60a994fe16d40e088e7787f838c10cd, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/298800e036424fcb885fa32e920fafdd, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/c1482be9eb234f2e9628c788106c0514, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/07cbd6c24c2d43cca9a6ac1e26585fe8, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/b7c4d06c13634c2ea1c6ec143e3f8511, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/87bbb873772045dda23ae48717b62019, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/de4b4ba574ca4d099bb65b96b0bbc19d, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/0ed7a2e236be420d9251d50ecd06b945, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/2f6e987e3f8b4b54afafcae04115f8dd, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/7d8941db1ebd40d68b4d6be2deeaec6e, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/6b2d91ed121844be96a4c2c5328e813b, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/8889dd9955c14e75b50f51169ca9da4b, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/805595b702b443508104f73ff6b7a192, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/ba432f88642f438e80655d15919e4a84] to archive 2024-11-28T09:21:29,632 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-28T09:21:29,637 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/7e1ea91b04fe4819a0bfc90a69f46818 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/7e1ea91b04fe4819a0bfc90a69f46818 2024-11-28T09:21:29,639 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/a9882ac20a764f4ba222c6a9499bf8c7 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/a9882ac20a764f4ba222c6a9499bf8c7 2024-11-28T09:21:29,640 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/fe05bbe89ff64b239bff72b0430fd8bd to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/fe05bbe89ff64b239bff72b0430fd8bd 2024-11-28T09:21:29,641 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/cab232f0eb2045858f38cdbc52497af7 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/cab232f0eb2045858f38cdbc52497af7 2024-11-28T09:21:29,642 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/9f266bdf4df248049c2b1263fbf5ec5f to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/9f266bdf4df248049c2b1263fbf5ec5f 2024-11-28T09:21:29,644 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/28b9dd5e179844fc9fa9d9b8d66aa7f4 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/28b9dd5e179844fc9fa9d9b8d66aa7f4 2024-11-28T09:21:29,645 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/966aacd9447d40bbbbd1c89b498116a5 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/966aacd9447d40bbbbd1c89b498116a5 2024-11-28T09:21:29,646 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/6df0d666ff304643b2f5deea360b3b10 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/6df0d666ff304643b2f5deea360b3b10 2024-11-28T09:21:29,647 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/e7ff64d653684d10a9c600f549fd9e52 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/e7ff64d653684d10a9c600f549fd9e52 2024-11-28T09:21:29,648 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/2d0dc6b9254445c5b006f9826cfe0c66 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/2d0dc6b9254445c5b006f9826cfe0c66 2024-11-28T09:21:29,649 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/f74483259a0041ac9413b50906cdb3c1 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/f74483259a0041ac9413b50906cdb3c1 2024-11-28T09:21:29,651 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/d5687b2e4d7843278bc0f46e8aed4fbb to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/d5687b2e4d7843278bc0f46e8aed4fbb 2024-11-28T09:21:29,652 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/bbb746abcd984554a68e47fca6555c48 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/bbb746abcd984554a68e47fca6555c48 2024-11-28T09:21:29,653 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/c5c597cd416b481ebb083f053e6798de to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/c5c597cd416b481ebb083f053e6798de 2024-11-28T09:21:29,655 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/2165e11f7eba40e3b1b206abf2abab25 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/2165e11f7eba40e3b1b206abf2abab25 2024-11-28T09:21:29,656 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/ad19c5c674934ab3af12c773abf1670f to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/ad19c5c674934ab3af12c773abf1670f 2024-11-28T09:21:29,657 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/4b49c21658d8467f974b4b395d68604c to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/4b49c21658d8467f974b4b395d68604c 2024-11-28T09:21:29,659 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/82b20c17b539492582220cafb69aa353 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/82b20c17b539492582220cafb69aa353 2024-11-28T09:21:29,660 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/ffa8316651904c74863181b981a12aa8 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/ffa8316651904c74863181b981a12aa8 2024-11-28T09:21:29,661 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/48837577ee0740b3bb2aba337d3b20ce to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/48837577ee0740b3bb2aba337d3b20ce 2024-11-28T09:21:29,662 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/ad7dade332994af6b86152fdb98af785 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/ad7dade332994af6b86152fdb98af785 2024-11-28T09:21:29,664 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/e5c8c911279b47ada50487b0f0e6196d to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/e5c8c911279b47ada50487b0f0e6196d 2024-11-28T09:21:29,665 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/5b6c2145bf6a47ec81cb0efb1c001e29 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/5b6c2145bf6a47ec81cb0efb1c001e29 2024-11-28T09:21:29,666 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/d328d7f0c2584593bc921fe9950dab16 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/d328d7f0c2584593bc921fe9950dab16 2024-11-28T09:21:29,667 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/da83118749c94a439c19261f241693f5 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/da83118749c94a439c19261f241693f5 2024-11-28T09:21:29,669 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/93ff1aad62b2484ebd241aafed147a6d to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/93ff1aad62b2484ebd241aafed147a6d 2024-11-28T09:21:29,670 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/5e03a7525e834955ac51ac8fb62f4365 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/5e03a7525e834955ac51ac8fb62f4365 2024-11-28T09:21:29,671 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/47c3fe778b884a88a31113570baa635e to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/47c3fe778b884a88a31113570baa635e 2024-11-28T09:21:29,673 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/1085bbdd55284d5e9d8a25e551c9ecca to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/1085bbdd55284d5e9d8a25e551c9ecca 2024-11-28T09:21:29,674 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/4a014196b8284b5f9565b760fb79f4a2 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/4a014196b8284b5f9565b760fb79f4a2 2024-11-28T09:21:29,675 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/6579b96d8ff3449fb4dc8231d53c77c5 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/6579b96d8ff3449fb4dc8231d53c77c5 2024-11-28T09:21:29,677 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/e60a994fe16d40e088e7787f838c10cd to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/e60a994fe16d40e088e7787f838c10cd 2024-11-28T09:21:29,678 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/298800e036424fcb885fa32e920fafdd to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/298800e036424fcb885fa32e920fafdd 2024-11-28T09:21:29,679 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/c1482be9eb234f2e9628c788106c0514 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/c1482be9eb234f2e9628c788106c0514 2024-11-28T09:21:29,681 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/07cbd6c24c2d43cca9a6ac1e26585fe8 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/07cbd6c24c2d43cca9a6ac1e26585fe8 2024-11-28T09:21:29,682 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/b7c4d06c13634c2ea1c6ec143e3f8511 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/b7c4d06c13634c2ea1c6ec143e3f8511 2024-11-28T09:21:29,684 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/87bbb873772045dda23ae48717b62019 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/87bbb873772045dda23ae48717b62019 2024-11-28T09:21:29,685 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/de4b4ba574ca4d099bb65b96b0bbc19d to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/de4b4ba574ca4d099bb65b96b0bbc19d 2024-11-28T09:21:29,686 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/0ed7a2e236be420d9251d50ecd06b945 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/0ed7a2e236be420d9251d50ecd06b945 2024-11-28T09:21:29,688 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/2f6e987e3f8b4b54afafcae04115f8dd to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/2f6e987e3f8b4b54afafcae04115f8dd 2024-11-28T09:21:29,689 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/7d8941db1ebd40d68b4d6be2deeaec6e to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/7d8941db1ebd40d68b4d6be2deeaec6e 2024-11-28T09:21:29,690 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/6b2d91ed121844be96a4c2c5328e813b to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/6b2d91ed121844be96a4c2c5328e813b 2024-11-28T09:21:29,692 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/8889dd9955c14e75b50f51169ca9da4b to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/8889dd9955c14e75b50f51169ca9da4b 2024-11-28T09:21:29,693 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/805595b702b443508104f73ff6b7a192 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/805595b702b443508104f73ff6b7a192 2024-11-28T09:21:29,694 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/ba432f88642f438e80655d15919e4a84 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/ba432f88642f438e80655d15919e4a84 2024-11-28T09:21:29,712 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/a967d15758314ec1bfef65c494186807, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/92499738def54736ab192558e2fcfefe, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/f55d313d1d0245429f8f4c92bb2c3f32, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/ee62ac3905b74c81829271c17a5753c7, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/2ff9fc18a1674860b0b09fba684f04db, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/773286ecd3404e6fb0dff827785c0ec4, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/6b6252abc20944539f22915924c21355, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/1599c39e42544fb884b33043f275019f, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/c90ead9366fe4b51a995061681991ac1, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/85c1ffb7bb334915be6cc6ffbf2cc817, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/3e4debb7e31048298801393fab1b8852, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/348ee4e246a04d7698ac79dd6cedb662, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/df1870d9e7d340f0b599792d86e48cfd, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/af4d4e9df8a9400989f34bd39ac7275f, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/7aa09265d9a74f87a0ffa43a0a09a2d0, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/8172e3078f934ec19e1b860390020f01, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/bbbeddef82eb47828c30fd3faa14c373, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/03fd2e0092c042469fb4c71bdf97dd36, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/20b375872730489599645fe902432acd, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/aabee2ff60874d59a521034961d9b711, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/40e1776815e44a81b228b117e667e7c1, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/b5e6fb1317bd45ea98f97c5ad4903916, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/ecb98be753854a5085a65df5bd006166, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/73acd2c7c6da4ccc99151657ead94482, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/b42dd754c2294a6ca14f3daba3943932, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/a851c5d966874d6d85a332bf1252bafd, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/c274091a558a4b30a23858f0bab1f103, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/bf530c833d254ff0a594176b5ad86a62, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/17c5677a3f6a406686a4d8b52c177684, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/a9a00bd3ce94400699fa5df86dbed03d, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/8e8805aafd78428b87670fa9bccf2b17, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/d6e6699dfe054747b7921a17ab21874b, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/84e204be4e244a6f9cbd083493f39651, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/2b9885531e9c47f8830f8c5901a30716, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/e8999256f9f74ca991de0903b4f95e06, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/0d345da904c74eaf8d8ad0733eeed7f4, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/2e50387421eb4d25b53a14acd7a00990, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/cba26a53c5b64817a6cdd017cde34e4b, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/bca48b4b5b0548d28e2ddd330218a98b, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/d1ac739f65784be38387bb5fc2898bb4, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/41f02988a58d43bdb6eeca37b302114a, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/c19b38e83b0748c18cb1158e235c9ad0, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/467d7d71be85480fb6cbb77406b8c3e1, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/c3ce3fdba0c84f8781ffea3e20e4f2d0, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/ed6a5a490dd74e379d48e95c0342d4db] to archive 2024-11-28T09:21:29,713 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-28T09:21:29,715 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/a967d15758314ec1bfef65c494186807 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/a967d15758314ec1bfef65c494186807 2024-11-28T09:21:29,717 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/92499738def54736ab192558e2fcfefe to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/92499738def54736ab192558e2fcfefe 2024-11-28T09:21:29,718 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/f55d313d1d0245429f8f4c92bb2c3f32 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/f55d313d1d0245429f8f4c92bb2c3f32 2024-11-28T09:21:29,720 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/ee62ac3905b74c81829271c17a5753c7 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/ee62ac3905b74c81829271c17a5753c7 2024-11-28T09:21:29,721 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/2ff9fc18a1674860b0b09fba684f04db to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/2ff9fc18a1674860b0b09fba684f04db 2024-11-28T09:21:29,722 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/773286ecd3404e6fb0dff827785c0ec4 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/773286ecd3404e6fb0dff827785c0ec4 2024-11-28T09:21:29,723 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/6b6252abc20944539f22915924c21355 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/6b6252abc20944539f22915924c21355 2024-11-28T09:21:29,725 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/1599c39e42544fb884b33043f275019f to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/1599c39e42544fb884b33043f275019f 2024-11-28T09:21:29,726 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/c90ead9366fe4b51a995061681991ac1 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/c90ead9366fe4b51a995061681991ac1 2024-11-28T09:21:29,727 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/85c1ffb7bb334915be6cc6ffbf2cc817 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/85c1ffb7bb334915be6cc6ffbf2cc817 2024-11-28T09:21:29,728 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/3e4debb7e31048298801393fab1b8852 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/3e4debb7e31048298801393fab1b8852 2024-11-28T09:21:29,729 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/348ee4e246a04d7698ac79dd6cedb662 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/348ee4e246a04d7698ac79dd6cedb662 2024-11-28T09:21:29,731 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/df1870d9e7d340f0b599792d86e48cfd to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/df1870d9e7d340f0b599792d86e48cfd 2024-11-28T09:21:29,732 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/af4d4e9df8a9400989f34bd39ac7275f to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/af4d4e9df8a9400989f34bd39ac7275f 2024-11-28T09:21:29,733 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/7aa09265d9a74f87a0ffa43a0a09a2d0 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/7aa09265d9a74f87a0ffa43a0a09a2d0 2024-11-28T09:21:29,735 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/8172e3078f934ec19e1b860390020f01 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/8172e3078f934ec19e1b860390020f01 2024-11-28T09:21:29,736 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/bbbeddef82eb47828c30fd3faa14c373 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/bbbeddef82eb47828c30fd3faa14c373 2024-11-28T09:21:29,737 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/03fd2e0092c042469fb4c71bdf97dd36 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/03fd2e0092c042469fb4c71bdf97dd36 2024-11-28T09:21:29,739 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/20b375872730489599645fe902432acd to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/20b375872730489599645fe902432acd 2024-11-28T09:21:29,740 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/aabee2ff60874d59a521034961d9b711 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/aabee2ff60874d59a521034961d9b711 2024-11-28T09:21:29,741 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/40e1776815e44a81b228b117e667e7c1 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/40e1776815e44a81b228b117e667e7c1 2024-11-28T09:21:29,742 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/b5e6fb1317bd45ea98f97c5ad4903916 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/b5e6fb1317bd45ea98f97c5ad4903916 2024-11-28T09:21:29,744 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/ecb98be753854a5085a65df5bd006166 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/ecb98be753854a5085a65df5bd006166 2024-11-28T09:21:29,745 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/73acd2c7c6da4ccc99151657ead94482 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/73acd2c7c6da4ccc99151657ead94482 2024-11-28T09:21:29,747 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/b42dd754c2294a6ca14f3daba3943932 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/b42dd754c2294a6ca14f3daba3943932 2024-11-28T09:21:29,748 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/a851c5d966874d6d85a332bf1252bafd to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/a851c5d966874d6d85a332bf1252bafd 2024-11-28T09:21:29,749 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/c274091a558a4b30a23858f0bab1f103 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/c274091a558a4b30a23858f0bab1f103 2024-11-28T09:21:29,751 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/bf530c833d254ff0a594176b5ad86a62 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/bf530c833d254ff0a594176b5ad86a62 2024-11-28T09:21:29,752 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/17c5677a3f6a406686a4d8b52c177684 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/17c5677a3f6a406686a4d8b52c177684 2024-11-28T09:21:29,753 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/a9a00bd3ce94400699fa5df86dbed03d to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/a9a00bd3ce94400699fa5df86dbed03d 2024-11-28T09:21:29,755 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/8e8805aafd78428b87670fa9bccf2b17 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/8e8805aafd78428b87670fa9bccf2b17 2024-11-28T09:21:29,756 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/d6e6699dfe054747b7921a17ab21874b to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/d6e6699dfe054747b7921a17ab21874b 2024-11-28T09:21:29,757 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/84e204be4e244a6f9cbd083493f39651 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/84e204be4e244a6f9cbd083493f39651 2024-11-28T09:21:29,758 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/2b9885531e9c47f8830f8c5901a30716 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/2b9885531e9c47f8830f8c5901a30716 2024-11-28T09:21:29,759 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/e8999256f9f74ca991de0903b4f95e06 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/e8999256f9f74ca991de0903b4f95e06 2024-11-28T09:21:29,760 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/0d345da904c74eaf8d8ad0733eeed7f4 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/0d345da904c74eaf8d8ad0733eeed7f4 2024-11-28T09:21:29,762 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/2e50387421eb4d25b53a14acd7a00990 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/2e50387421eb4d25b53a14acd7a00990 2024-11-28T09:21:29,763 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/cba26a53c5b64817a6cdd017cde34e4b to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/cba26a53c5b64817a6cdd017cde34e4b 2024-11-28T09:21:29,764 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/bca48b4b5b0548d28e2ddd330218a98b to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/bca48b4b5b0548d28e2ddd330218a98b 2024-11-28T09:21:29,765 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/d1ac739f65784be38387bb5fc2898bb4 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/d1ac739f65784be38387bb5fc2898bb4 2024-11-28T09:21:29,766 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/41f02988a58d43bdb6eeca37b302114a to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/41f02988a58d43bdb6eeca37b302114a 2024-11-28T09:21:29,768 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/c19b38e83b0748c18cb1158e235c9ad0 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/c19b38e83b0748c18cb1158e235c9ad0 2024-11-28T09:21:29,769 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/467d7d71be85480fb6cbb77406b8c3e1 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/467d7d71be85480fb6cbb77406b8c3e1 2024-11-28T09:21:29,770 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/c3ce3fdba0c84f8781ffea3e20e4f2d0 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/c3ce3fdba0c84f8781ffea3e20e4f2d0 2024-11-28T09:21:29,771 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/ed6a5a490dd74e379d48e95c0342d4db to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/ed6a5a490dd74e379d48e95c0342d4db 2024-11-28T09:21:29,774 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/937cea3554b44cf1a054e114e354c292, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/5fd300af94c34569b4cfe84a5f73bc47, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/1cc22f2a610141409c960909cfb55c7c, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/7563fe8c408f442a94d8bd33622ec3ba, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/d64b8d699d1f4b3e8dc61289750f0a04, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/e129cc0946c340d39b7fee4fc2cf5363, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/eea68bd99e214065959bfec3755c299d, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/0c3f831c93e644bf86ced4b2f7eb7193, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/8c5c8b6ccaef48c4b48beed3863950b0, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/c5e8f8abf08b49fc8218007576064972, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/472fe96c02f24e7d89be5c2012fb4fa8, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/29762ee6ee7445c9b707d935759db3ec, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/2b3cb36e1eca460da89bd978b565c6ba, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/a092317c2fee477da3c8a31d47d2fc2c, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/1b10d23923e44072a695f9ea4cba8bb0, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/594decbcdfc149caab544833146f1749, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/80c9368eeef741b7a07e81d92b46ead2, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/5ece68736174432482fe3a690869c169, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/c5472e8b324340b396ba63225ee3ddcb, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/dfcd54593fa14e899c96acca27e6ebb0, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/92a7df0f49004adba40363d002a3572c, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/ec43509c63444c739fa804ffd4eccbc2, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/2d5538daf2964794a6ca46f8c2fc9917, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/8b2935459e1b47d6b912e9736dd81aaf, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/034816f3b2164364bf6ff0ef36bb7d51, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/6d11d81a377f4183a1e5ea5cb457ea2b, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/e5c716f6ec9f4dc18f6c4a1b52bcd33f, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/edb235d5ad3c4e91a4df77416adb6ed4, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/bde5741138d947fcaa18b8875ecdb400, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/58cde7fe1b6e49b99f02fc875426d0e7, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/dd1b5b9788734180a010609ced4a5730, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/ae702beeaf71432598c8e39a2d37f6b2, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/54329f7cec6f48fb9f5aea20bd77f431, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/32cfa76c57b841f9a5e08d9d29c61afd, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/ba75a0c09a594f1f92ca29c944c37832, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/1d8805a4ed2f493ebc8709a5aee320e4, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/23dd942bd20643688eb7dea534288d7c, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/6ea2ffa7488747618faedbd0335356d1, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/175c62654f9f4e99ab1927a25381a5c4, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/e4fbaa3ca2f4471db8dc48450cda3c08, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/1fd7e8af29ec46219be65a9f70a134b8, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/c855f330c9914d16b2558fb9c67b7642, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/b08f0729c2c24571afc5d7b423f94cad, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/5039dd90b70f4627a0f9ec5cf39f4182, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/05a77c77318342619b315711fecc20a3] to archive 2024-11-28T09:21:29,775 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-28T09:21:29,777 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/937cea3554b44cf1a054e114e354c292 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/937cea3554b44cf1a054e114e354c292 2024-11-28T09:21:29,778 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/5fd300af94c34569b4cfe84a5f73bc47 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/5fd300af94c34569b4cfe84a5f73bc47 2024-11-28T09:21:29,779 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/1cc22f2a610141409c960909cfb55c7c to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/1cc22f2a610141409c960909cfb55c7c 2024-11-28T09:21:29,780 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/7563fe8c408f442a94d8bd33622ec3ba to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/7563fe8c408f442a94d8bd33622ec3ba 2024-11-28T09:21:29,781 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/d64b8d699d1f4b3e8dc61289750f0a04 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/d64b8d699d1f4b3e8dc61289750f0a04 2024-11-28T09:21:29,782 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/e129cc0946c340d39b7fee4fc2cf5363 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/e129cc0946c340d39b7fee4fc2cf5363 2024-11-28T09:21:29,783 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/eea68bd99e214065959bfec3755c299d to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/eea68bd99e214065959bfec3755c299d 2024-11-28T09:21:29,784 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/0c3f831c93e644bf86ced4b2f7eb7193 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/0c3f831c93e644bf86ced4b2f7eb7193 2024-11-28T09:21:29,786 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/8c5c8b6ccaef48c4b48beed3863950b0 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/8c5c8b6ccaef48c4b48beed3863950b0 2024-11-28T09:21:29,787 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/c5e8f8abf08b49fc8218007576064972 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/c5e8f8abf08b49fc8218007576064972 2024-11-28T09:21:29,788 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/472fe96c02f24e7d89be5c2012fb4fa8 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/472fe96c02f24e7d89be5c2012fb4fa8 2024-11-28T09:21:29,789 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/29762ee6ee7445c9b707d935759db3ec to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/29762ee6ee7445c9b707d935759db3ec 2024-11-28T09:21:29,790 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/2b3cb36e1eca460da89bd978b565c6ba to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/2b3cb36e1eca460da89bd978b565c6ba 2024-11-28T09:21:29,791 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/a092317c2fee477da3c8a31d47d2fc2c to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/a092317c2fee477da3c8a31d47d2fc2c 2024-11-28T09:21:29,793 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/1b10d23923e44072a695f9ea4cba8bb0 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/1b10d23923e44072a695f9ea4cba8bb0 2024-11-28T09:21:29,794 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/594decbcdfc149caab544833146f1749 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/594decbcdfc149caab544833146f1749 2024-11-28T09:21:29,795 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/80c9368eeef741b7a07e81d92b46ead2 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/80c9368eeef741b7a07e81d92b46ead2 2024-11-28T09:21:29,796 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/5ece68736174432482fe3a690869c169 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/5ece68736174432482fe3a690869c169 2024-11-28T09:21:29,797 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/c5472e8b324340b396ba63225ee3ddcb to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/c5472e8b324340b396ba63225ee3ddcb 2024-11-28T09:21:29,799 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/dfcd54593fa14e899c96acca27e6ebb0 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/dfcd54593fa14e899c96acca27e6ebb0 2024-11-28T09:21:29,800 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/92a7df0f49004adba40363d002a3572c to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/92a7df0f49004adba40363d002a3572c 2024-11-28T09:21:29,801 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/ec43509c63444c739fa804ffd4eccbc2 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/ec43509c63444c739fa804ffd4eccbc2 2024-11-28T09:21:29,803 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/2d5538daf2964794a6ca46f8c2fc9917 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/2d5538daf2964794a6ca46f8c2fc9917 2024-11-28T09:21:29,804 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/8b2935459e1b47d6b912e9736dd81aaf to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/8b2935459e1b47d6b912e9736dd81aaf 2024-11-28T09:21:29,805 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/034816f3b2164364bf6ff0ef36bb7d51 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/034816f3b2164364bf6ff0ef36bb7d51 2024-11-28T09:21:29,807 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/6d11d81a377f4183a1e5ea5cb457ea2b to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/6d11d81a377f4183a1e5ea5cb457ea2b 2024-11-28T09:21:29,808 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/e5c716f6ec9f4dc18f6c4a1b52bcd33f to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/e5c716f6ec9f4dc18f6c4a1b52bcd33f 2024-11-28T09:21:29,809 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/edb235d5ad3c4e91a4df77416adb6ed4 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/edb235d5ad3c4e91a4df77416adb6ed4 2024-11-28T09:21:29,810 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/bde5741138d947fcaa18b8875ecdb400 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/bde5741138d947fcaa18b8875ecdb400 2024-11-28T09:21:29,811 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/58cde7fe1b6e49b99f02fc875426d0e7 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/58cde7fe1b6e49b99f02fc875426d0e7 2024-11-28T09:21:29,813 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/dd1b5b9788734180a010609ced4a5730 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/dd1b5b9788734180a010609ced4a5730 2024-11-28T09:21:29,814 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/ae702beeaf71432598c8e39a2d37f6b2 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/ae702beeaf71432598c8e39a2d37f6b2 2024-11-28T09:21:29,815 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/54329f7cec6f48fb9f5aea20bd77f431 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/54329f7cec6f48fb9f5aea20bd77f431 2024-11-28T09:21:29,816 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/32cfa76c57b841f9a5e08d9d29c61afd to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/32cfa76c57b841f9a5e08d9d29c61afd 2024-11-28T09:21:29,817 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/ba75a0c09a594f1f92ca29c944c37832 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/ba75a0c09a594f1f92ca29c944c37832 2024-11-28T09:21:29,818 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/1d8805a4ed2f493ebc8709a5aee320e4 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/1d8805a4ed2f493ebc8709a5aee320e4 2024-11-28T09:21:29,819 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/23dd942bd20643688eb7dea534288d7c to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/23dd942bd20643688eb7dea534288d7c 2024-11-28T09:21:29,821 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/6ea2ffa7488747618faedbd0335356d1 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/6ea2ffa7488747618faedbd0335356d1 2024-11-28T09:21:29,822 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/175c62654f9f4e99ab1927a25381a5c4 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/175c62654f9f4e99ab1927a25381a5c4 2024-11-28T09:21:29,824 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/e4fbaa3ca2f4471db8dc48450cda3c08 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/e4fbaa3ca2f4471db8dc48450cda3c08 2024-11-28T09:21:29,825 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/1fd7e8af29ec46219be65a9f70a134b8 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/1fd7e8af29ec46219be65a9f70a134b8 2024-11-28T09:21:29,827 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/c855f330c9914d16b2558fb9c67b7642 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/c855f330c9914d16b2558fb9c67b7642 2024-11-28T09:21:29,828 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/b08f0729c2c24571afc5d7b423f94cad to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/b08f0729c2c24571afc5d7b423f94cad 2024-11-28T09:21:29,829 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/5039dd90b70f4627a0f9ec5cf39f4182 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/5039dd90b70f4627a0f9ec5cf39f4182 2024-11-28T09:21:29,831 DEBUG [StoreCloser-TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/05a77c77318342619b315711fecc20a3 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/05a77c77318342619b315711fecc20a3 2024-11-28T09:21:29,837 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/recovered.edits/658.seqid, newMaxSeqId=658, maxSeqId=1 2024-11-28T09:21:29,843 INFO [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff. 2024-11-28T09:21:29,843 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1635): Region close journal for 9324112e51bee406916a385aca28ddff: 2024-11-28T09:21:29,845 INFO [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] handler.UnassignRegionHandler(170): Closed 9324112e51bee406916a385aca28ddff 2024-11-28T09:21:29,846 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=40 updating hbase:meta row=9324112e51bee406916a385aca28ddff, regionState=CLOSED 2024-11-28T09:21:29,849 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=41, resume processing ppid=40 2024-11-28T09:21:29,849 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=41, ppid=40, state=SUCCESS; CloseRegionProcedure 9324112e51bee406916a385aca28ddff, server=363d8d38a970,33819,1732785660637 in 2.6670 sec 2024-11-28T09:21:29,851 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=40, resume processing ppid=39 2024-11-28T09:21:29,851 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=40, ppid=39, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=9324112e51bee406916a385aca28ddff, UNASSIGN in 2.6720 sec 2024-11-28T09:21:29,853 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=39, resume processing ppid=38 2024-11-28T09:21:29,853 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=39, ppid=38, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 2.6790 sec 2024-11-28T09:21:29,855 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732785689855"}]},"ts":"1732785689855"} 2024-11-28T09:21:29,856 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-28T09:21:29,859 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-28T09:21:29,861 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=38, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 2.6990 sec 2024-11-28T09:21:30,390 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-11-28T09:21:31,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=38 2024-11-28T09:21:31,273 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 38 completed 2024-11-28T09:21:31,276 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-28T09:21:31,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] procedure2.ProcedureExecutor(1098): Stored pid=42, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-28T09:21:31,282 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=42, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-28T09:21:31,283 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=42, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-28T09:21:31,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=42 2024-11-28T09:21:31,286 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff 2024-11-28T09:21:31,290 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A, FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B, FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C, FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/recovered.edits] 2024-11-28T09:21:31,292 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/3ecaa57222e244dca9a6d329160e7b8e to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/3ecaa57222e244dca9a6d329160e7b8e 2024-11-28T09:21:31,294 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/bdf8bb7909fc467183a433a050cd26e2 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/bdf8bb7909fc467183a433a050cd26e2 2024-11-28T09:21:31,295 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/d06c92b7b0af44a3bee5de614352d08d to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/A/d06c92b7b0af44a3bee5de614352d08d 2024-11-28T09:21:31,297 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/20553519ce1e4afbb70eef09c30331c4 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/20553519ce1e4afbb70eef09c30331c4 2024-11-28T09:21:31,298 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/c77c5cca74524a6f863f6d33403dc785 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/c77c5cca74524a6f863f6d33403dc785 2024-11-28T09:21:31,299 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/e99f1e753b14478aa81b0380d067148c to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/B/e99f1e753b14478aa81b0380d067148c 2024-11-28T09:21:31,302 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/4400d95e83ee40759cf81fd6e78ed260 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/4400d95e83ee40759cf81fd6e78ed260 2024-11-28T09:21:31,303 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/8c3a09d90bf149b4b9fa5ce44c70d269 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/8c3a09d90bf149b4b9fa5ce44c70d269 2024-11-28T09:21:31,304 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/940f62b03799429f8f0cab4027ff014d to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/C/940f62b03799429f8f0cab4027ff014d 2024-11-28T09:21:31,306 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/recovered.edits/658.seqid to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff/recovered.edits/658.seqid 2024-11-28T09:21:31,307 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/9324112e51bee406916a385aca28ddff 2024-11-28T09:21:31,307 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-28T09:21:31,312 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=42, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-28T09:21:31,316 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] util.ReflectedFunctionCache(97): Populated cache for org.apache.hadoop.hbase.filter.KeyOnlyFilter in 0ms 2024-11-28T09:21:31,320 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-28T09:21:31,352 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-28T09:21:31,353 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=42, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-28T09:21:31,353 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-28T09:21:31,354 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732785691353"}]},"ts":"9223372036854775807"} 2024-11-28T09:21:31,357 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-28T09:21:31,357 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 9324112e51bee406916a385aca28ddff, NAME => 'TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff.', STARTKEY => '', ENDKEY => ''}] 2024-11-28T09:21:31,358 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-28T09:21:31,358 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732785691358"}]},"ts":"9223372036854775807"} 2024-11-28T09:21:31,361 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-28T09:21:31,365 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=42, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-28T09:21:31,366 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=42, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 88 msec 2024-11-28T09:21:31,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=42 2024-11-28T09:21:31,384 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 42 completed 2024-11-28T09:21:31,397 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithBasicPolicy#testMixedAtomicity Thread=240 (was 219) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x4ff0f410-shared-pool-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS:0;363d8d38a970:33819-shortCompactions-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.PriorityBlockingQueue.take(PriorityBlockingQueue.java:535) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-54249782_22 at /127.0.0.1:55028 [Waiting for operation #353] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: hconnection-0x4ff0f410-shared-pool-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x4ff0f410-shared-pool-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1675825963_22 at /127.0.0.1:57784 [Waiting for operation #212] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x4ff0f410-shared-pool-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1675825963_22 at /127.0.0.1:57426 [Waiting for operation #333] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/363d8d38a970:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=458 (was 444) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=382 (was 300) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=4901 (was 5445) 2024-11-28T09:21:31,406 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithBasicPolicy#testMobMixedAtomicity Thread=240, OpenFileDescriptor=458, MaxFileDescriptor=1048576, SystemLoadAverage=382, ProcessCount=11, AvailableMemoryMB=4901 2024-11-28T09:21:31,407 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-28T09:21:31,408 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-28T09:21:31,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] procedure2.ProcedureExecutor(1098): Stored pid=43, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-28T09:21:31,409 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=43, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-28T09:21:31,410 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:31,410 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 43 2024-11-28T09:21:31,410 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=43, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-28T09:21:31,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-11-28T09:21:31,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741983_1159 (size=960) 2024-11-28T09:21:31,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-11-28T09:21:31,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-11-28T09:21:31,818 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532 2024-11-28T09:21:31,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741984_1160 (size=53) 2024-11-28T09:21:32,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-11-28T09:21:32,225 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T09:21:32,225 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 7dfd6fe000bc8ebd2fc5a572e1596e7a, disabling compactions & flushes 2024-11-28T09:21:32,225 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:32,225 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:32,225 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. after waiting 0 ms 2024-11-28T09:21:32,225 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:32,225 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:32,225 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 7dfd6fe000bc8ebd2fc5a572e1596e7a: 2024-11-28T09:21:32,226 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=43, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-28T09:21:32,227 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732785692226"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732785692226"}]},"ts":"1732785692226"} 2024-11-28T09:21:32,228 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-28T09:21:32,229 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=43, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-28T09:21:32,229 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732785692229"}]},"ts":"1732785692229"} 2024-11-28T09:21:32,230 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-28T09:21:32,235 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=44, ppid=43, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=7dfd6fe000bc8ebd2fc5a572e1596e7a, ASSIGN}] 2024-11-28T09:21:32,236 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=44, ppid=43, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=7dfd6fe000bc8ebd2fc5a572e1596e7a, ASSIGN 2024-11-28T09:21:32,237 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=44, ppid=43, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=7dfd6fe000bc8ebd2fc5a572e1596e7a, ASSIGN; state=OFFLINE, location=363d8d38a970,33819,1732785660637; forceNewPlan=false, retain=false 2024-11-28T09:21:32,388 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=44 updating hbase:meta row=7dfd6fe000bc8ebd2fc5a572e1596e7a, regionState=OPENING, regionLocation=363d8d38a970,33819,1732785660637 2024-11-28T09:21:32,390 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=45, ppid=44, state=RUNNABLE; OpenRegionProcedure 7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637}] 2024-11-28T09:21:32,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-11-28T09:21:32,542 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:32,545 INFO [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:32,545 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(7285): Opening region: {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} 2024-11-28T09:21:32,546 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 7dfd6fe000bc8ebd2fc5a572e1596e7a 2024-11-28T09:21:32,546 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T09:21:32,546 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(7327): checking encryption for 7dfd6fe000bc8ebd2fc5a572e1596e7a 2024-11-28T09:21:32,546 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(7330): checking classloading for 7dfd6fe000bc8ebd2fc5a572e1596e7a 2024-11-28T09:21:32,547 INFO [StoreOpener-7dfd6fe000bc8ebd2fc5a572e1596e7a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 7dfd6fe000bc8ebd2fc5a572e1596e7a 2024-11-28T09:21:32,549 INFO [StoreOpener-7dfd6fe000bc8ebd2fc5a572e1596e7a-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-28T09:21:32,549 INFO [StoreOpener-7dfd6fe000bc8ebd2fc5a572e1596e7a-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7dfd6fe000bc8ebd2fc5a572e1596e7a columnFamilyName A 2024-11-28T09:21:32,549 DEBUG [StoreOpener-7dfd6fe000bc8ebd2fc5a572e1596e7a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:32,550 INFO [StoreOpener-7dfd6fe000bc8ebd2fc5a572e1596e7a-1 {}] regionserver.HStore(327): Store=7dfd6fe000bc8ebd2fc5a572e1596e7a/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T09:21:32,550 INFO [StoreOpener-7dfd6fe000bc8ebd2fc5a572e1596e7a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 7dfd6fe000bc8ebd2fc5a572e1596e7a 2024-11-28T09:21:32,551 INFO [StoreOpener-7dfd6fe000bc8ebd2fc5a572e1596e7a-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-28T09:21:32,551 INFO [StoreOpener-7dfd6fe000bc8ebd2fc5a572e1596e7a-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7dfd6fe000bc8ebd2fc5a572e1596e7a columnFamilyName B 2024-11-28T09:21:32,552 DEBUG [StoreOpener-7dfd6fe000bc8ebd2fc5a572e1596e7a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:32,552 INFO [StoreOpener-7dfd6fe000bc8ebd2fc5a572e1596e7a-1 {}] regionserver.HStore(327): Store=7dfd6fe000bc8ebd2fc5a572e1596e7a/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T09:21:32,552 INFO [StoreOpener-7dfd6fe000bc8ebd2fc5a572e1596e7a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 7dfd6fe000bc8ebd2fc5a572e1596e7a 2024-11-28T09:21:32,553 INFO [StoreOpener-7dfd6fe000bc8ebd2fc5a572e1596e7a-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-28T09:21:32,553 INFO [StoreOpener-7dfd6fe000bc8ebd2fc5a572e1596e7a-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7dfd6fe000bc8ebd2fc5a572e1596e7a columnFamilyName C 2024-11-28T09:21:32,553 DEBUG [StoreOpener-7dfd6fe000bc8ebd2fc5a572e1596e7a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:32,554 INFO [StoreOpener-7dfd6fe000bc8ebd2fc5a572e1596e7a-1 {}] regionserver.HStore(327): Store=7dfd6fe000bc8ebd2fc5a572e1596e7a/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T09:21:32,554 INFO [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:32,555 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a 2024-11-28T09:21:32,555 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a 2024-11-28T09:21:32,556 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-28T09:21:32,557 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(1085): writing seq id for 7dfd6fe000bc8ebd2fc5a572e1596e7a 2024-11-28T09:21:32,559 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-28T09:21:32,560 INFO [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(1102): Opened 7dfd6fe000bc8ebd2fc5a572e1596e7a; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60579326, jitterRate=-0.09729769825935364}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-28T09:21:32,560 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(1001): Region open journal for 7dfd6fe000bc8ebd2fc5a572e1596e7a: 2024-11-28T09:21:32,561 INFO [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a., pid=45, masterSystemTime=1732785692542 2024-11-28T09:21:32,563 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:32,563 INFO [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:32,563 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=44 updating hbase:meta row=7dfd6fe000bc8ebd2fc5a572e1596e7a, regionState=OPEN, openSeqNum=2, regionLocation=363d8d38a970,33819,1732785660637 2024-11-28T09:21:32,566 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=45, resume processing ppid=44 2024-11-28T09:21:32,566 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=45, ppid=44, state=SUCCESS; OpenRegionProcedure 7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 in 174 msec 2024-11-28T09:21:32,567 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=44, resume processing ppid=43 2024-11-28T09:21:32,567 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=44, ppid=43, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=7dfd6fe000bc8ebd2fc5a572e1596e7a, ASSIGN in 331 msec 2024-11-28T09:21:32,568 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=43, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-28T09:21:32,568 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732785692568"}]},"ts":"1732785692568"} 2024-11-28T09:21:32,569 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-28T09:21:32,572 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=43, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-28T09:21:32,573 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=43, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1640 sec 2024-11-28T09:21:33,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-11-28T09:21:33,516 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 43 completed 2024-11-28T09:21:33,518 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4e560c7b to 127.0.0.1:53251 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4ddf4c3 2024-11-28T09:21:33,521 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@ff872d8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T09:21:33,523 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T09:21:33,525 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57144, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T09:21:33,527 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-28T09:21:33,528 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38672, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-28T09:21:33,535 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-28T09:21:33,535 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-28T09:21:33,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] procedure2.ProcedureExecutor(1098): Stored pid=46, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-11-28T09:21:33,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741985_1161 (size=996) 2024-11-28T09:21:33,954 DEBUG [PEWorker-2 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.960 2024-11-28T09:21:33,954 INFO [PEWorker-2 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.996 2024-11-28T09:21:33,958 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=47, ppid=46, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-28T09:21:33,967 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=48, ppid=47, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=7dfd6fe000bc8ebd2fc5a572e1596e7a, REOPEN/MOVE}] 2024-11-28T09:21:33,967 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=48, ppid=47, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=7dfd6fe000bc8ebd2fc5a572e1596e7a, REOPEN/MOVE 2024-11-28T09:21:33,968 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=48 updating hbase:meta row=7dfd6fe000bc8ebd2fc5a572e1596e7a, regionState=CLOSING, regionLocation=363d8d38a970,33819,1732785660637 2024-11-28T09:21:33,969 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-28T09:21:33,969 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=49, ppid=48, state=RUNNABLE; CloseRegionProcedure 7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637}] 2024-11-28T09:21:34,121 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:34,122 INFO [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] handler.UnassignRegionHandler(124): Close 7dfd6fe000bc8ebd2fc5a572e1596e7a 2024-11-28T09:21:34,122 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-28T09:21:34,122 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] regionserver.HRegion(1681): Closing 7dfd6fe000bc8ebd2fc5a572e1596e7a, disabling compactions & flushes 2024-11-28T09:21:34,122 INFO [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:34,122 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:34,122 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. after waiting 0 ms 2024-11-28T09:21:34,122 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:34,126 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-11-28T09:21:34,127 INFO [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:34,127 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] regionserver.HRegion(1635): Region close journal for 7dfd6fe000bc8ebd2fc5a572e1596e7a: 2024-11-28T09:21:34,127 WARN [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] regionserver.HRegionServer(3786): Not adding moved region record: 7dfd6fe000bc8ebd2fc5a572e1596e7a to self. 2024-11-28T09:21:34,129 INFO [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=49}] handler.UnassignRegionHandler(170): Closed 7dfd6fe000bc8ebd2fc5a572e1596e7a 2024-11-28T09:21:34,129 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=48 updating hbase:meta row=7dfd6fe000bc8ebd2fc5a572e1596e7a, regionState=CLOSED 2024-11-28T09:21:34,132 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=49, resume processing ppid=48 2024-11-28T09:21:34,132 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=49, ppid=48, state=SUCCESS; CloseRegionProcedure 7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 in 162 msec 2024-11-28T09:21:34,132 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=48, ppid=47, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=7dfd6fe000bc8ebd2fc5a572e1596e7a, REOPEN/MOVE; state=CLOSED, location=363d8d38a970,33819,1732785660637; forceNewPlan=false, retain=true 2024-11-28T09:21:34,283 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=48 updating hbase:meta row=7dfd6fe000bc8ebd2fc5a572e1596e7a, regionState=OPENING, regionLocation=363d8d38a970,33819,1732785660637 2024-11-28T09:21:34,284 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=50, ppid=48, state=RUNNABLE; OpenRegionProcedure 7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637}] 2024-11-28T09:21:34,312 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-28T09:21:34,313 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38686, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-28T09:21:34,436 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:34,439 INFO [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=50}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:34,439 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=50}] regionserver.HRegion(7285): Opening region: {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} 2024-11-28T09:21:34,440 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=50}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 7dfd6fe000bc8ebd2fc5a572e1596e7a 2024-11-28T09:21:34,440 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=50}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T09:21:34,440 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=50}] regionserver.HRegion(7327): checking encryption for 7dfd6fe000bc8ebd2fc5a572e1596e7a 2024-11-28T09:21:34,440 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=50}] regionserver.HRegion(7330): checking classloading for 7dfd6fe000bc8ebd2fc5a572e1596e7a 2024-11-28T09:21:34,444 INFO [StoreOpener-7dfd6fe000bc8ebd2fc5a572e1596e7a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 7dfd6fe000bc8ebd2fc5a572e1596e7a 2024-11-28T09:21:34,445 INFO [StoreOpener-7dfd6fe000bc8ebd2fc5a572e1596e7a-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-28T09:21:34,450 INFO [StoreOpener-7dfd6fe000bc8ebd2fc5a572e1596e7a-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7dfd6fe000bc8ebd2fc5a572e1596e7a columnFamilyName A 2024-11-28T09:21:34,452 DEBUG [StoreOpener-7dfd6fe000bc8ebd2fc5a572e1596e7a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:34,453 INFO [StoreOpener-7dfd6fe000bc8ebd2fc5a572e1596e7a-1 {}] regionserver.HStore(327): Store=7dfd6fe000bc8ebd2fc5a572e1596e7a/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T09:21:34,453 INFO [StoreOpener-7dfd6fe000bc8ebd2fc5a572e1596e7a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 7dfd6fe000bc8ebd2fc5a572e1596e7a 2024-11-28T09:21:34,454 INFO [StoreOpener-7dfd6fe000bc8ebd2fc5a572e1596e7a-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-28T09:21:34,454 INFO [StoreOpener-7dfd6fe000bc8ebd2fc5a572e1596e7a-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7dfd6fe000bc8ebd2fc5a572e1596e7a columnFamilyName B 2024-11-28T09:21:34,454 DEBUG [StoreOpener-7dfd6fe000bc8ebd2fc5a572e1596e7a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:34,454 INFO [StoreOpener-7dfd6fe000bc8ebd2fc5a572e1596e7a-1 {}] regionserver.HStore(327): Store=7dfd6fe000bc8ebd2fc5a572e1596e7a/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T09:21:34,455 INFO [StoreOpener-7dfd6fe000bc8ebd2fc5a572e1596e7a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 7dfd6fe000bc8ebd2fc5a572e1596e7a 2024-11-28T09:21:34,455 INFO [StoreOpener-7dfd6fe000bc8ebd2fc5a572e1596e7a-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-28T09:21:34,455 INFO [StoreOpener-7dfd6fe000bc8ebd2fc5a572e1596e7a-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7dfd6fe000bc8ebd2fc5a572e1596e7a columnFamilyName C 2024-11-28T09:21:34,455 DEBUG [StoreOpener-7dfd6fe000bc8ebd2fc5a572e1596e7a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:34,456 INFO [StoreOpener-7dfd6fe000bc8ebd2fc5a572e1596e7a-1 {}] regionserver.HStore(327): Store=7dfd6fe000bc8ebd2fc5a572e1596e7a/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T09:21:34,456 INFO [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=50}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:34,456 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=50}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a 2024-11-28T09:21:34,457 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=50}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a 2024-11-28T09:21:34,459 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=50}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-28T09:21:34,460 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=50}] regionserver.HRegion(1085): writing seq id for 7dfd6fe000bc8ebd2fc5a572e1596e7a 2024-11-28T09:21:34,461 INFO [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=50}] regionserver.HRegion(1102): Opened 7dfd6fe000bc8ebd2fc5a572e1596e7a; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59417484, jitterRate=-0.11461049318313599}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-28T09:21:34,462 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=50}] regionserver.HRegion(1001): Region open journal for 7dfd6fe000bc8ebd2fc5a572e1596e7a: 2024-11-28T09:21:34,463 INFO [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=50}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a., pid=50, masterSystemTime=1732785694436 2024-11-28T09:21:34,465 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=50}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:34,465 INFO [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=50}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:34,465 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=48 updating hbase:meta row=7dfd6fe000bc8ebd2fc5a572e1596e7a, regionState=OPEN, openSeqNum=5, regionLocation=363d8d38a970,33819,1732785660637 2024-11-28T09:21:34,467 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=50, resume processing ppid=48 2024-11-28T09:21:34,467 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=50, ppid=48, state=SUCCESS; OpenRegionProcedure 7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 in 182 msec 2024-11-28T09:21:34,469 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=48, resume processing ppid=47 2024-11-28T09:21:34,469 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=48, ppid=47, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=7dfd6fe000bc8ebd2fc5a572e1596e7a, REOPEN/MOVE in 500 msec 2024-11-28T09:21:34,472 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=47, resume processing ppid=46 2024-11-28T09:21:34,472 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=47, ppid=46, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 513 msec 2024-11-28T09:21:34,475 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=46, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 936 msec 2024-11-28T09:21:34,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=46 2024-11-28T09:21:34,484 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1c826820 to 127.0.0.1:53251 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@29458edd 2024-11-28T09:21:34,489 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7cae6c5c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T09:21:34,491 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2931c73e to 127.0.0.1:53251 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4c7d6279 2024-11-28T09:21:34,494 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5c820ef9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T09:21:34,496 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x176c5c1b to 127.0.0.1:53251 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@328f994d 2024-11-28T09:21:34,499 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@e3a4420, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T09:21:34,501 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x190853fc to 127.0.0.1:53251 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@a9306be 2024-11-28T09:21:34,505 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@42e904d8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T09:21:34,506 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x46114993 to 127.0.0.1:53251 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@769942d9 2024-11-28T09:21:34,511 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7c5c4716, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T09:21:34,512 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x367f47f7 to 127.0.0.1:53251 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2885d2d9 2024-11-28T09:21:34,516 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@cb464a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T09:21:34,517 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x247c0c93 to 127.0.0.1:53251 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@22e911df 2024-11-28T09:21:34,521 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@78cafade, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T09:21:34,522 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x517ff977 to 127.0.0.1:53251 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3b727d6e 2024-11-28T09:21:34,526 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@14c16cd4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T09:21:34,527 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3448d233 to 127.0.0.1:53251 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1c7940d9 2024-11-28T09:21:34,533 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@341384e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T09:21:34,537 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T09:21:34,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] procedure2.ProcedureExecutor(1098): Stored pid=51, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees 2024-11-28T09:21:34,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-28T09:21:34,538 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=51, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T09:21:34,539 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=51, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T09:21:34,539 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=52, ppid=51, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T09:21:34,540 DEBUG [hconnection-0x27062950-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T09:21:34,544 DEBUG [hconnection-0xd8d984f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T09:21:34,546 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57148, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T09:21:34,548 DEBUG [hconnection-0x215a6d10-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T09:21:34,549 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57154, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T09:21:34,550 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57156, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T09:21:34,552 DEBUG [hconnection-0x6ddea86a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T09:21:34,553 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57172, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T09:21:34,556 DEBUG [hconnection-0x6c91a889-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T09:21:34,558 DEBUG [hconnection-0x36374063-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T09:21:34,558 DEBUG [hconnection-0x54365b78-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T09:21:34,559 DEBUG [hconnection-0x55c3144e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T09:21:34,559 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57184, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T09:21:34,561 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57190, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T09:21:34,561 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57188, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T09:21:34,561 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57180, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T09:21:34,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 7dfd6fe000bc8ebd2fc5a572e1596e7a 2024-11-28T09:21:34,569 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7dfd6fe000bc8ebd2fc5a572e1596e7a 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-28T09:21:34,571 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7dfd6fe000bc8ebd2fc5a572e1596e7a, store=A 2024-11-28T09:21:34,571 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:34,571 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7dfd6fe000bc8ebd2fc5a572e1596e7a, store=B 2024-11-28T09:21:34,571 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:34,571 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7dfd6fe000bc8ebd2fc5a572e1596e7a, store=C 2024-11-28T09:21:34,571 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:34,578 DEBUG [hconnection-0x615bfe4c-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T09:21:34,579 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57202, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T09:21:34,623 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:34,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57148 deadline: 1732785754616, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:34,625 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:34,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57156 deadline: 1732785754620, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:34,625 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:34,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57180 deadline: 1732785754620, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:34,633 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112898f4fcf0abac41238fb57537c923491c_7dfd6fe000bc8ebd2fc5a572e1596e7a is 50, key is test_row_0/A:col10/1732785694557/Put/seqid=0 2024-11-28T09:21:34,634 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:34,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57190 deadline: 1732785754622, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:34,635 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:34,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57184 deadline: 1732785754624, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:34,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-28T09:21:34,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741986_1162 (size=12154) 2024-11-28T09:21:34,650 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:34,655 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112898f4fcf0abac41238fb57537c923491c_7dfd6fe000bc8ebd2fc5a572e1596e7a to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112898f4fcf0abac41238fb57537c923491c_7dfd6fe000bc8ebd2fc5a572e1596e7a 2024-11-28T09:21:34,656 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/09a3a325e8d54edb96910c7d02943c8e, store: [table=TestAcidGuarantees family=A region=7dfd6fe000bc8ebd2fc5a572e1596e7a] 2024-11-28T09:21:34,666 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/09a3a325e8d54edb96910c7d02943c8e is 175, key is test_row_0/A:col10/1732785694557/Put/seqid=0 2024-11-28T09:21:34,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741987_1163 (size=30955) 2024-11-28T09:21:34,681 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=17, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/09a3a325e8d54edb96910c7d02943c8e 2024-11-28T09:21:34,690 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:34,690 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-28T09:21:34,691 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:34,691 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. as already flushing 2024-11-28T09:21:34,691 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:34,691 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:34,691 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:34,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:34,734 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/B/20bcc31e1d7f4ed08a25a121279e88e8 is 50, key is test_row_0/B:col10/1732785694557/Put/seqid=0 2024-11-28T09:21:34,735 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:34,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57148 deadline: 1732785754727, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:34,736 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:34,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57180 deadline: 1732785754734, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:34,737 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:34,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57156 deadline: 1732785754735, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:34,738 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:34,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57184 deadline: 1732785754737, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:34,739 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:34,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57190 deadline: 1732785754737, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:34,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741988_1164 (size=12001) 2024-11-28T09:21:34,761 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/B/20bcc31e1d7f4ed08a25a121279e88e8 2024-11-28T09:21:34,798 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/C/d53b2e24c17c4535829e64a09d9e0098 is 50, key is test_row_0/C:col10/1732785694557/Put/seqid=0 2024-11-28T09:21:34,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741989_1165 (size=12001) 2024-11-28T09:21:34,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-28T09:21:34,844 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:34,845 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-28T09:21:34,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:34,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. as already flushing 2024-11-28T09:21:34,845 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:34,845 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:34,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:34,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:34,940 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:34,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57148 deadline: 1732785754938, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:34,941 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:34,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57156 deadline: 1732785754939, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:34,943 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:34,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57180 deadline: 1732785754940, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:34,951 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:34,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57184 deadline: 1732785754941, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:34,951 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:34,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57190 deadline: 1732785754946, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:34,998 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:34,999 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-28T09:21:34,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:34,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. as already flushing 2024-11-28T09:21:34,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:34,999 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:34,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:35,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:35,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-28T09:21:35,152 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:35,153 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-28T09:21:35,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:35,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. as already flushing 2024-11-28T09:21:35,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:35,153 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:35,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:35,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:35,213 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/C/d53b2e24c17c4535829e64a09d9e0098 2024-11-28T09:21:35,220 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/09a3a325e8d54edb96910c7d02943c8e as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/09a3a325e8d54edb96910c7d02943c8e 2024-11-28T09:21:35,229 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/09a3a325e8d54edb96910c7d02943c8e, entries=150, sequenceid=17, filesize=30.2 K 2024-11-28T09:21:35,230 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/B/20bcc31e1d7f4ed08a25a121279e88e8 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/20bcc31e1d7f4ed08a25a121279e88e8 2024-11-28T09:21:35,244 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:35,245 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/20bcc31e1d7f4ed08a25a121279e88e8, entries=150, sequenceid=17, filesize=11.7 K 2024-11-28T09:21:35,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57148 deadline: 1732785755243, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:35,245 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:35,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57156 deadline: 1732785755244, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:35,246 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/C/d53b2e24c17c4535829e64a09d9e0098 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/d53b2e24c17c4535829e64a09d9e0098 2024-11-28T09:21:35,251 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:35,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57180 deadline: 1732785755246, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:35,252 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/d53b2e24c17c4535829e64a09d9e0098, entries=150, sequenceid=17, filesize=11.7 K 2024-11-28T09:21:35,254 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 7dfd6fe000bc8ebd2fc5a572e1596e7a in 684ms, sequenceid=17, compaction requested=false 2024-11-28T09:21:35,254 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7dfd6fe000bc8ebd2fc5a572e1596e7a: 2024-11-28T09:21:35,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 7dfd6fe000bc8ebd2fc5a572e1596e7a 2024-11-28T09:21:35,258 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7dfd6fe000bc8ebd2fc5a572e1596e7a 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-28T09:21:35,259 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7dfd6fe000bc8ebd2fc5a572e1596e7a, store=A 2024-11-28T09:21:35,259 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:35,259 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7dfd6fe000bc8ebd2fc5a572e1596e7a, store=B 2024-11-28T09:21:35,259 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:35,259 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7dfd6fe000bc8ebd2fc5a572e1596e7a, store=C 2024-11-28T09:21:35,259 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:35,293 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128ec41996a54bf4cb0bc286a7c34d72fbb_7dfd6fe000bc8ebd2fc5a572e1596e7a is 50, key is test_row_0/A:col10/1732785695256/Put/seqid=0 2024-11-28T09:21:35,293 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:35,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57184 deadline: 1732785755288, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:35,300 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:35,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57190 deadline: 1732785755294, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:35,306 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:35,307 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-28T09:21:35,307 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:35,308 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. as already flushing 2024-11-28T09:21:35,308 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:35,308 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:35,308 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:35,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:35,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741990_1166 (size=17034) 2024-11-28T09:21:35,326 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,331 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128ec41996a54bf4cb0bc286a7c34d72fbb_7dfd6fe000bc8ebd2fc5a572e1596e7a to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128ec41996a54bf4cb0bc286a7c34d72fbb_7dfd6fe000bc8ebd2fc5a572e1596e7a 2024-11-28T09:21:35,333 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/b488f9c3dccc445a82918f28ef505d7f, store: [table=TestAcidGuarantees family=A region=7dfd6fe000bc8ebd2fc5a572e1596e7a] 2024-11-28T09:21:35,333 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/b488f9c3dccc445a82918f28ef505d7f is 175, key is test_row_0/A:col10/1732785695256/Put/seqid=0 2024-11-28T09:21:35,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741991_1167 (size=48139) 2024-11-28T09:21:35,350 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=42, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/b488f9c3dccc445a82918f28ef505d7f 2024-11-28T09:21:35,369 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/B/ef5107a2c5ac4b3da197110247c08dcb is 50, key is test_row_0/B:col10/1732785695256/Put/seqid=0 2024-11-28T09:21:35,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741992_1168 (size=12001) 2024-11-28T09:21:35,386 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/B/ef5107a2c5ac4b3da197110247c08dcb 2024-11-28T09:21:35,397 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:35,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57184 deadline: 1732785755396, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:35,405 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:35,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57190 deadline: 1732785755405, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:35,412 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-28T09:21:35,415 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/C/4c5ee1e1845d4d55bc6796a1582e1c41 is 50, key is test_row_0/C:col10/1732785695256/Put/seqid=0 2024-11-28T09:21:35,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741993_1169 (size=12001) 2024-11-28T09:21:35,454 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/C/4c5ee1e1845d4d55bc6796a1582e1c41 2024-11-28T09:21:35,460 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:35,461 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-28T09:21:35,461 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:35,461 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. as already flushing 2024-11-28T09:21:35,461 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:35,461 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:35,461 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:35,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:35,465 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/b488f9c3dccc445a82918f28ef505d7f as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/b488f9c3dccc445a82918f28ef505d7f 2024-11-28T09:21:35,475 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/b488f9c3dccc445a82918f28ef505d7f, entries=250, sequenceid=42, filesize=47.0 K 2024-11-28T09:21:35,477 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/B/ef5107a2c5ac4b3da197110247c08dcb as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/ef5107a2c5ac4b3da197110247c08dcb 2024-11-28T09:21:35,483 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/ef5107a2c5ac4b3da197110247c08dcb, entries=150, sequenceid=42, filesize=11.7 K 2024-11-28T09:21:35,484 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/C/4c5ee1e1845d4d55bc6796a1582e1c41 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/4c5ee1e1845d4d55bc6796a1582e1c41 2024-11-28T09:21:35,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,490 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/4c5ee1e1845d4d55bc6796a1582e1c41, entries=150, sequenceid=42, filesize=11.7 K 2024-11-28T09:21:35,492 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 7dfd6fe000bc8ebd2fc5a572e1596e7a in 233ms, sequenceid=42, compaction requested=false 2024-11-28T09:21:35,492 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7dfd6fe000bc8ebd2fc5a572e1596e7a: 2024-11-28T09:21:35,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,609 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7dfd6fe000bc8ebd2fc5a572e1596e7a 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-28T09:21:35,609 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7dfd6fe000bc8ebd2fc5a572e1596e7a, store=A 2024-11-28T09:21:35,610 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:35,610 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7dfd6fe000bc8ebd2fc5a572e1596e7a, store=B 2024-11-28T09:21:35,610 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:35,610 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7dfd6fe000bc8ebd2fc5a572e1596e7a, store=C 2024-11-28T09:21:35,610 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:35,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 7dfd6fe000bc8ebd2fc5a572e1596e7a 2024-11-28T09:21:35,623 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:35,624 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-28T09:21:35,624 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:35,624 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. as already flushing 2024-11-28T09:21:35,624 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:35,624 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:35,624 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:35,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:35,645 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411284e262d005a094142815fc2940ddf2969_7dfd6fe000bc8ebd2fc5a572e1596e7a is 50, key is test_row_0/A:col10/1732785695291/Put/seqid=0 2024-11-28T09:21:35,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-28T09:21:35,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741994_1170 (size=12154) 2024-11-28T09:21:35,658 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:35,664 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411284e262d005a094142815fc2940ddf2969_7dfd6fe000bc8ebd2fc5a572e1596e7a to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411284e262d005a094142815fc2940ddf2969_7dfd6fe000bc8ebd2fc5a572e1596e7a 2024-11-28T09:21:35,665 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/dc810fc4725441ebbd28481cf0164077, store: [table=TestAcidGuarantees family=A region=7dfd6fe000bc8ebd2fc5a572e1596e7a] 2024-11-28T09:21:35,665 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/dc810fc4725441ebbd28481cf0164077 is 175, key is test_row_0/A:col10/1732785695291/Put/seqid=0 2024-11-28T09:21:35,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741995_1171 (size=30955) 2024-11-28T09:21:35,776 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:35,776 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-28T09:21:35,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:35,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. as already flushing 2024-11-28T09:21:35,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:35,777 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:35,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:35,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:35,808 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:35,808 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:35,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57156 deadline: 1732785755779, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:35,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57184 deadline: 1732785755779, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:35,811 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:35,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57148 deadline: 1732785755781, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:35,822 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:35,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57180 deadline: 1732785755808, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:35,823 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:35,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57190 deadline: 1732785755808, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:35,911 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:35,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57184 deadline: 1732785755910, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:35,911 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:35,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57156 deadline: 1732785755910, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:35,921 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:35,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57148 deadline: 1732785755913, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:35,927 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:35,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57180 deadline: 1732785755924, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:35,928 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:35,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57190 deadline: 1732785755925, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:35,930 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:35,931 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-28T09:21:35,931 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:35,931 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. as already flushing 2024-11-28T09:21:35,931 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:35,931 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:35,931 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:35,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:36,084 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:36,085 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-28T09:21:36,086 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:36,086 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. as already flushing 2024-11-28T09:21:36,086 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:36,086 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:36,086 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:36,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:36,114 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:36,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57184 deadline: 1732785756113, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:36,115 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:36,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57156 deadline: 1732785756113, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:36,125 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:36,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57148 deadline: 1732785756124, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:36,128 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=54, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/dc810fc4725441ebbd28481cf0164077 2024-11-28T09:21:36,131 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:36,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57180 deadline: 1732785756130, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:36,133 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:36,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57190 deadline: 1732785756132, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:36,155 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/B/cb8505e8c36b474484c7ca9dccc88256 is 50, key is test_row_0/B:col10/1732785695291/Put/seqid=0 2024-11-28T09:21:36,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741996_1172 (size=12001) 2024-11-28T09:21:36,163 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=54 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/B/cb8505e8c36b474484c7ca9dccc88256 2024-11-28T09:21:36,188 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/C/77c1ac16b9ce433da956cd35c7ad7fe8 is 50, key is test_row_0/C:col10/1732785695291/Put/seqid=0 2024-11-28T09:21:36,213 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741997_1173 (size=12001) 2024-11-28T09:21:36,238 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:36,239 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-28T09:21:36,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:36,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. as already flushing 2024-11-28T09:21:36,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:36,239 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:36,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:36,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:36,392 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:36,393 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-28T09:21:36,393 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:36,393 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. as already flushing 2024-11-28T09:21:36,393 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:36,393 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:36,393 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:36,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:36,418 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:36,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57156 deadline: 1732785756417, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:36,420 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:36,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57184 deadline: 1732785756418, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:36,431 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:36,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57148 deadline: 1732785756430, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:36,435 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:36,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57180 deadline: 1732785756433, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:36,440 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:36,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57190 deadline: 1732785756437, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:36,546 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:36,546 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-28T09:21:36,547 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:36,547 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. as already flushing 2024-11-28T09:21:36,547 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:36,547 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:36,548 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:36,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:36,618 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=54 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/C/77c1ac16b9ce433da956cd35c7ad7fe8 2024-11-28T09:21:36,626 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/dc810fc4725441ebbd28481cf0164077 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/dc810fc4725441ebbd28481cf0164077 2024-11-28T09:21:36,633 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/dc810fc4725441ebbd28481cf0164077, entries=150, sequenceid=54, filesize=30.2 K 2024-11-28T09:21:36,634 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/B/cb8505e8c36b474484c7ca9dccc88256 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/cb8505e8c36b474484c7ca9dccc88256 2024-11-28T09:21:36,639 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/cb8505e8c36b474484c7ca9dccc88256, entries=150, sequenceid=54, filesize=11.7 K 2024-11-28T09:21:36,643 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/C/77c1ac16b9ce433da956cd35c7ad7fe8 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/77c1ac16b9ce433da956cd35c7ad7fe8 2024-11-28T09:21:36,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-28T09:21:36,650 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/77c1ac16b9ce433da956cd35c7ad7fe8, entries=150, sequenceid=54, filesize=11.7 K 2024-11-28T09:21:36,651 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for 7dfd6fe000bc8ebd2fc5a572e1596e7a in 1041ms, sequenceid=54, compaction requested=true 2024-11-28T09:21:36,651 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7dfd6fe000bc8ebd2fc5a572e1596e7a: 2024-11-28T09:21:36,651 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7dfd6fe000bc8ebd2fc5a572e1596e7a:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T09:21:36,651 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:21:36,651 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:21:36,651 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7dfd6fe000bc8ebd2fc5a572e1596e7a:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T09:21:36,651 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:21:36,651 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:21:36,651 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7dfd6fe000bc8ebd2fc5a572e1596e7a:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T09:21:36,651 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:21:36,653 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:21:36,653 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): 7dfd6fe000bc8ebd2fc5a572e1596e7a/B is initiating minor compaction (all files) 2024-11-28T09:21:36,653 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7dfd6fe000bc8ebd2fc5a572e1596e7a/B in TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:36,653 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/20bcc31e1d7f4ed08a25a121279e88e8, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/ef5107a2c5ac4b3da197110247c08dcb, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/cb8505e8c36b474484c7ca9dccc88256] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp, totalSize=35.2 K 2024-11-28T09:21:36,653 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 110049 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:21:36,653 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1540): 7dfd6fe000bc8ebd2fc5a572e1596e7a/A is initiating minor compaction (all files) 2024-11-28T09:21:36,653 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7dfd6fe000bc8ebd2fc5a572e1596e7a/A in TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:36,653 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/09a3a325e8d54edb96910c7d02943c8e, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/b488f9c3dccc445a82918f28ef505d7f, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/dc810fc4725441ebbd28481cf0164077] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp, totalSize=107.5 K 2024-11-28T09:21:36,653 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:36,653 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. files: [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/09a3a325e8d54edb96910c7d02943c8e, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/b488f9c3dccc445a82918f28ef505d7f, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/dc810fc4725441ebbd28481cf0164077] 2024-11-28T09:21:36,654 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 20bcc31e1d7f4ed08a25a121279e88e8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1732785694557 2024-11-28T09:21:36,654 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 09a3a325e8d54edb96910c7d02943c8e, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1732785694557 2024-11-28T09:21:36,655 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting ef5107a2c5ac4b3da197110247c08dcb, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=42, earliestPutTs=1732785694619 2024-11-28T09:21:36,655 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting b488f9c3dccc445a82918f28ef505d7f, keycount=250, bloomtype=ROW, size=47.0 K, encoding=NONE, compression=NONE, seqNum=42, earliestPutTs=1732785694609 2024-11-28T09:21:36,655 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting cb8505e8c36b474484c7ca9dccc88256, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1732785695280 2024-11-28T09:21:36,655 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting dc810fc4725441ebbd28481cf0164077, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1732785695280 2024-11-28T09:21:36,683 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7dfd6fe000bc8ebd2fc5a572e1596e7a#B#compaction#153 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:21:36,683 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/B/c0c94a3ccb284f78a8a23b609b64d3f2 is 50, key is test_row_0/B:col10/1732785695291/Put/seqid=0 2024-11-28T09:21:36,697 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=7dfd6fe000bc8ebd2fc5a572e1596e7a] 2024-11-28T09:21:36,701 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:36,701 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-28T09:21:36,701 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:36,702 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2837): Flushing 7dfd6fe000bc8ebd2fc5a572e1596e7a 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-28T09:21:36,702 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7dfd6fe000bc8ebd2fc5a572e1596e7a, store=A 2024-11-28T09:21:36,702 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:36,702 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7dfd6fe000bc8ebd2fc5a572e1596e7a, store=B 2024-11-28T09:21:36,702 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:36,702 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7dfd6fe000bc8ebd2fc5a572e1596e7a, store=C 2024-11-28T09:21:36,702 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:36,713 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241128ffddbde0c9ee44ce8224b4cc548c6a9b_7dfd6fe000bc8ebd2fc5a572e1596e7a store=[table=TestAcidGuarantees family=A region=7dfd6fe000bc8ebd2fc5a572e1596e7a] 2024-11-28T09:21:36,717 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241128ffddbde0c9ee44ce8224b4cc548c6a9b_7dfd6fe000bc8ebd2fc5a572e1596e7a, store=[table=TestAcidGuarantees family=A region=7dfd6fe000bc8ebd2fc5a572e1596e7a] 2024-11-28T09:21:36,717 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128ffddbde0c9ee44ce8224b4cc548c6a9b_7dfd6fe000bc8ebd2fc5a572e1596e7a because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=7dfd6fe000bc8ebd2fc5a572e1596e7a] 2024-11-28T09:21:36,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741998_1174 (size=12104) 2024-11-28T09:21:36,759 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128df090fbcb22740d280acfc7643ed88d5_7dfd6fe000bc8ebd2fc5a572e1596e7a is 50, key is test_row_0/A:col10/1732785695800/Put/seqid=0 2024-11-28T09:21:36,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741999_1175 (size=4469) 2024-11-28T09:21:36,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742000_1176 (size=12154) 2024-11-28T09:21:36,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 7dfd6fe000bc8ebd2fc5a572e1596e7a 2024-11-28T09:21:36,931 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. as already flushing 2024-11-28T09:21:36,945 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:36,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57148 deadline: 1732785756941, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:36,946 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:36,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57184 deadline: 1732785756942, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:36,948 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:36,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57190 deadline: 1732785756943, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:36,949 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:36,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57180 deadline: 1732785756944, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:36,949 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:36,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57156 deadline: 1732785756945, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:37,048 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:37,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57148 deadline: 1732785757046, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:37,049 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:37,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57184 deadline: 1732785757047, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:37,053 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:37,053 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:37,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57156 deadline: 1732785757051, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:37,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57180 deadline: 1732785757051, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:37,167 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/B/c0c94a3ccb284f78a8a23b609b64d3f2 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/c0c94a3ccb284f78a8a23b609b64d3f2 2024-11-28T09:21:37,173 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7dfd6fe000bc8ebd2fc5a572e1596e7a#A#compaction#154 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:21:37,176 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/71d1ec676a3d4b76aa414abb1e3877a2 is 175, key is test_row_0/A:col10/1732785695291/Put/seqid=0 2024-11-28T09:21:37,179 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7dfd6fe000bc8ebd2fc5a572e1596e7a/B of 7dfd6fe000bc8ebd2fc5a572e1596e7a into c0c94a3ccb284f78a8a23b609b64d3f2(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:21:37,179 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7dfd6fe000bc8ebd2fc5a572e1596e7a: 2024-11-28T09:21:37,179 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a., storeName=7dfd6fe000bc8ebd2fc5a572e1596e7a/B, priority=13, startTime=1732785696651; duration=0sec 2024-11-28T09:21:37,180 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:21:37,180 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7dfd6fe000bc8ebd2fc5a572e1596e7a:B 2024-11-28T09:21:37,180 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:21:37,182 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:21:37,182 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): 7dfd6fe000bc8ebd2fc5a572e1596e7a/C is initiating minor compaction (all files) 2024-11-28T09:21:37,182 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7dfd6fe000bc8ebd2fc5a572e1596e7a/C in TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:37,182 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/d53b2e24c17c4535829e64a09d9e0098, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/4c5ee1e1845d4d55bc6796a1582e1c41, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/77c1ac16b9ce433da956cd35c7ad7fe8] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp, totalSize=35.2 K 2024-11-28T09:21:37,183 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting d53b2e24c17c4535829e64a09d9e0098, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1732785694557 2024-11-28T09:21:37,183 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 4c5ee1e1845d4d55bc6796a1582e1c41, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=42, earliestPutTs=1732785694619 2024-11-28T09:21:37,183 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 77c1ac16b9ce433da956cd35c7ad7fe8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1732785695280 2024-11-28T09:21:37,195 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7dfd6fe000bc8ebd2fc5a572e1596e7a#C#compaction#156 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:21:37,195 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/C/b10aa5e6c52844d498ec068d5d57c148 is 50, key is test_row_0/C:col10/1732785695291/Put/seqid=0 2024-11-28T09:21:37,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:37,201 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128df090fbcb22740d280acfc7643ed88d5_7dfd6fe000bc8ebd2fc5a572e1596e7a to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128df090fbcb22740d280acfc7643ed88d5_7dfd6fe000bc8ebd2fc5a572e1596e7a 2024-11-28T09:21:37,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/64a06dc61c9b4ff0948cf6d229cd2a46, store: [table=TestAcidGuarantees family=A region=7dfd6fe000bc8ebd2fc5a572e1596e7a] 2024-11-28T09:21:37,204 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/64a06dc61c9b4ff0948cf6d229cd2a46 is 175, key is test_row_0/A:col10/1732785695800/Put/seqid=0 2024-11-28T09:21:37,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742001_1177 (size=31058) 2024-11-28T09:21:37,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742002_1178 (size=12104) 2024-11-28T09:21:37,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742003_1179 (size=30955) 2024-11-28T09:21:37,252 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:37,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57148 deadline: 1732785757250, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:37,252 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:37,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57184 deadline: 1732785757251, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:37,257 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:37,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57180 deadline: 1732785757255, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:37,258 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:37,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57156 deadline: 1732785757256, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:37,555 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:37,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57184 deadline: 1732785757554, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:37,556 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:37,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57148 deadline: 1732785757555, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:37,562 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:37,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57180 deadline: 1732785757560, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:37,563 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:37,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57156 deadline: 1732785757561, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:37,623 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/71d1ec676a3d4b76aa414abb1e3877a2 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/71d1ec676a3d4b76aa414abb1e3877a2 2024-11-28T09:21:37,624 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=79, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/64a06dc61c9b4ff0948cf6d229cd2a46 2024-11-28T09:21:37,629 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/C/b10aa5e6c52844d498ec068d5d57c148 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/b10aa5e6c52844d498ec068d5d57c148 2024-11-28T09:21:37,648 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7dfd6fe000bc8ebd2fc5a572e1596e7a/C of 7dfd6fe000bc8ebd2fc5a572e1596e7a into b10aa5e6c52844d498ec068d5d57c148(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:21:37,648 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7dfd6fe000bc8ebd2fc5a572e1596e7a: 2024-11-28T09:21:37,650 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/B/c63812a0f67543b9a093d014e80c8d96 is 50, key is test_row_0/B:col10/1732785695800/Put/seqid=0 2024-11-28T09:21:37,651 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a., storeName=7dfd6fe000bc8ebd2fc5a572e1596e7a/C, priority=13, startTime=1732785696651; duration=0sec 2024-11-28T09:21:37,651 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:21:37,651 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7dfd6fe000bc8ebd2fc5a572e1596e7a:C 2024-11-28T09:21:37,652 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7dfd6fe000bc8ebd2fc5a572e1596e7a/A of 7dfd6fe000bc8ebd2fc5a572e1596e7a into 71d1ec676a3d4b76aa414abb1e3877a2(size=30.3 K), total size for store is 30.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:21:37,652 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7dfd6fe000bc8ebd2fc5a572e1596e7a: 2024-11-28T09:21:37,652 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a., storeName=7dfd6fe000bc8ebd2fc5a572e1596e7a/A, priority=13, startTime=1732785696651; duration=0sec 2024-11-28T09:21:37,652 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:21:37,652 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7dfd6fe000bc8ebd2fc5a572e1596e7a:A 2024-11-28T09:21:37,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742004_1180 (size=12001) 2024-11-28T09:21:37,952 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:37,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57190 deadline: 1732785757950, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:38,058 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:38,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57184 deadline: 1732785758058, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:38,062 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:38,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57148 deadline: 1732785758059, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:38,067 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:38,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57180 deadline: 1732785758067, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:38,068 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:38,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57156 deadline: 1732785758067, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:38,084 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/B/c63812a0f67543b9a093d014e80c8d96 2024-11-28T09:21:38,098 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/C/30a4825ac01a4e2cbc017661982a0d88 is 50, key is test_row_0/C:col10/1732785695800/Put/seqid=0 2024-11-28T09:21:38,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742005_1181 (size=12001) 2024-11-28T09:21:38,107 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/C/30a4825ac01a4e2cbc017661982a0d88 2024-11-28T09:21:38,114 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/64a06dc61c9b4ff0948cf6d229cd2a46 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/64a06dc61c9b4ff0948cf6d229cd2a46 2024-11-28T09:21:38,122 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/64a06dc61c9b4ff0948cf6d229cd2a46, entries=150, sequenceid=79, filesize=30.2 K 2024-11-28T09:21:38,125 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/B/c63812a0f67543b9a093d014e80c8d96 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/c63812a0f67543b9a093d014e80c8d96 2024-11-28T09:21:38,131 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/c63812a0f67543b9a093d014e80c8d96, entries=150, sequenceid=79, filesize=11.7 K 2024-11-28T09:21:38,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/C/30a4825ac01a4e2cbc017661982a0d88 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/30a4825ac01a4e2cbc017661982a0d88 2024-11-28T09:21:38,141 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/30a4825ac01a4e2cbc017661982a0d88, entries=150, sequenceid=79, filesize=11.7 K 2024-11-28T09:21:38,143 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 7dfd6fe000bc8ebd2fc5a572e1596e7a in 1441ms, sequenceid=79, compaction requested=false 2024-11-28T09:21:38,143 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2538): Flush status journal for 7dfd6fe000bc8ebd2fc5a572e1596e7a: 2024-11-28T09:21:38,143 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:38,144 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=52 2024-11-28T09:21:38,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4106): Remote procedure done, pid=52 2024-11-28T09:21:38,147 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=52, resume processing ppid=51 2024-11-28T09:21:38,148 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=52, ppid=51, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.6060 sec 2024-11-28T09:21:38,151 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=51, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees in 3.6120 sec 2024-11-28T09:21:38,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-28T09:21:38,651 INFO [Thread-775 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 51 completed 2024-11-28T09:21:38,654 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T09:21:38,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] procedure2.ProcedureExecutor(1098): Stored pid=53, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees 2024-11-28T09:21:38,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-28T09:21:38,658 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=53, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T09:21:38,659 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=53, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T09:21:38,659 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=54, ppid=53, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T09:21:38,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-28T09:21:38,812 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:38,813 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-28T09:21:38,813 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:38,813 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2837): Flushing 7dfd6fe000bc8ebd2fc5a572e1596e7a 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-28T09:21:38,813 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7dfd6fe000bc8ebd2fc5a572e1596e7a, store=A 2024-11-28T09:21:38,813 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:38,813 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7dfd6fe000bc8ebd2fc5a572e1596e7a, store=B 2024-11-28T09:21:38,814 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:38,814 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7dfd6fe000bc8ebd2fc5a572e1596e7a, store=C 2024-11-28T09:21:38,814 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:38,823 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112895835dae24c14c6f837c2daebbdd2ebf_7dfd6fe000bc8ebd2fc5a572e1596e7a is 50, key is test_row_0/A:col10/1732785696942/Put/seqid=0 2024-11-28T09:21:38,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742006_1182 (size=12154) 2024-11-28T09:21:38,837 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:38,844 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112895835dae24c14c6f837c2daebbdd2ebf_7dfd6fe000bc8ebd2fc5a572e1596e7a to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112895835dae24c14c6f837c2daebbdd2ebf_7dfd6fe000bc8ebd2fc5a572e1596e7a 2024-11-28T09:21:38,850 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/eb655df444234c53babea1370fbc0bd4, store: [table=TestAcidGuarantees family=A region=7dfd6fe000bc8ebd2fc5a572e1596e7a] 2024-11-28T09:21:38,850 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/eb655df444234c53babea1370fbc0bd4 is 175, key is test_row_0/A:col10/1732785696942/Put/seqid=0 2024-11-28T09:21:38,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742007_1183 (size=30955) 2024-11-28T09:21:38,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-28T09:21:39,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 7dfd6fe000bc8ebd2fc5a572e1596e7a 2024-11-28T09:21:39,067 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. as already flushing 2024-11-28T09:21:39,110 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:39,110 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:39,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57156 deadline: 1732785759105, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:39,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57148 deadline: 1732785759106, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:39,115 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:39,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57180 deadline: 1732785759111, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:39,115 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:39,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57184 deadline: 1732785759111, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:39,213 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:39,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57156 deadline: 1732785759212, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:39,213 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:39,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57148 deadline: 1732785759212, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:39,216 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:39,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57180 deadline: 1732785759216, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:39,222 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:39,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57184 deadline: 1732785759220, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:39,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-28T09:21:39,281 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=93, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/eb655df444234c53babea1370fbc0bd4 2024-11-28T09:21:39,290 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/B/1d6f8adf3cef433aba3fa0153ee56251 is 50, key is test_row_0/B:col10/1732785696942/Put/seqid=0 2024-11-28T09:21:39,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742008_1184 (size=12001) 2024-11-28T09:21:39,306 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/B/1d6f8adf3cef433aba3fa0153ee56251 2024-11-28T09:21:39,317 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/C/dc8d391059cc4eb4a325435479d7f975 is 50, key is test_row_0/C:col10/1732785696942/Put/seqid=0 2024-11-28T09:21:39,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742009_1185 (size=12001) 2024-11-28T09:21:39,362 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/C/dc8d391059cc4eb4a325435479d7f975 2024-11-28T09:21:39,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/eb655df444234c53babea1370fbc0bd4 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/eb655df444234c53babea1370fbc0bd4 2024-11-28T09:21:39,377 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/eb655df444234c53babea1370fbc0bd4, entries=150, sequenceid=93, filesize=30.2 K 2024-11-28T09:21:39,378 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/B/1d6f8adf3cef433aba3fa0153ee56251 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/1d6f8adf3cef433aba3fa0153ee56251 2024-11-28T09:21:39,386 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/1d6f8adf3cef433aba3fa0153ee56251, entries=150, sequenceid=93, filesize=11.7 K 2024-11-28T09:21:39,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/C/dc8d391059cc4eb4a325435479d7f975 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/dc8d391059cc4eb4a325435479d7f975 2024-11-28T09:21:39,399 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/dc8d391059cc4eb4a325435479d7f975, entries=150, sequenceid=93, filesize=11.7 K 2024-11-28T09:21:39,400 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for 7dfd6fe000bc8ebd2fc5a572e1596e7a in 587ms, sequenceid=93, compaction requested=true 2024-11-28T09:21:39,400 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2538): Flush status journal for 7dfd6fe000bc8ebd2fc5a572e1596e7a: 2024-11-28T09:21:39,400 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:39,400 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=54 2024-11-28T09:21:39,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4106): Remote procedure done, pid=54 2024-11-28T09:21:39,403 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=54, resume processing ppid=53 2024-11-28T09:21:39,403 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=54, ppid=53, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 742 msec 2024-11-28T09:21:39,405 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=53, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees in 750 msec 2024-11-28T09:21:39,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 7dfd6fe000bc8ebd2fc5a572e1596e7a 2024-11-28T09:21:39,419 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7dfd6fe000bc8ebd2fc5a572e1596e7a 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-28T09:21:39,420 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7dfd6fe000bc8ebd2fc5a572e1596e7a, store=A 2024-11-28T09:21:39,420 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:39,420 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7dfd6fe000bc8ebd2fc5a572e1596e7a, store=B 2024-11-28T09:21:39,420 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:39,421 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7dfd6fe000bc8ebd2fc5a572e1596e7a, store=C 2024-11-28T09:21:39,421 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:39,431 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:39,432 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112855810024491442ee8aa888a900725b3a_7dfd6fe000bc8ebd2fc5a572e1596e7a is 50, key is test_row_0/A:col10/1732785699095/Put/seqid=0 2024-11-28T09:21:39,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57148 deadline: 1732785759427, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:39,432 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:39,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57184 deadline: 1732785759429, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:39,435 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:39,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57180 deadline: 1732785759432, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:39,436 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:39,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57156 deadline: 1732785759432, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:39,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742010_1186 (size=17034) 2024-11-28T09:21:39,466 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:39,471 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112855810024491442ee8aa888a900725b3a_7dfd6fe000bc8ebd2fc5a572e1596e7a to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112855810024491442ee8aa888a900725b3a_7dfd6fe000bc8ebd2fc5a572e1596e7a 2024-11-28T09:21:39,472 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/583b4e1dcf674169a786ce0288f845ac, store: [table=TestAcidGuarantees family=A region=7dfd6fe000bc8ebd2fc5a572e1596e7a] 2024-11-28T09:21:39,473 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/583b4e1dcf674169a786ce0288f845ac is 175, key is test_row_0/A:col10/1732785699095/Put/seqid=0 2024-11-28T09:21:39,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742011_1187 (size=48139) 2024-11-28T09:21:39,534 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:39,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57148 deadline: 1732785759533, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:39,535 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:39,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57184 deadline: 1732785759533, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:39,537 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:39,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57180 deadline: 1732785759536, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:39,537 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:39,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57156 deadline: 1732785759537, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:39,738 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:39,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57148 deadline: 1732785759736, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:39,741 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:39,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57184 deadline: 1732785759737, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:39,741 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:39,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57180 deadline: 1732785759740, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:39,745 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:39,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57156 deadline: 1732785759744, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:39,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-28T09:21:39,762 INFO [Thread-775 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 53 completed 2024-11-28T09:21:39,768 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T09:21:39,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] procedure2.ProcedureExecutor(1098): Stored pid=55, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees 2024-11-28T09:21:39,770 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=55, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T09:21:39,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-28T09:21:39,771 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=55, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T09:21:39,771 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=56, ppid=55, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T09:21:39,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-28T09:21:39,879 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=121, memsize=55.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/583b4e1dcf674169a786ce0288f845ac 2024-11-28T09:21:39,898 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/B/4f7c9f26ae9449c3bd0c59ae48595b69 is 50, key is test_row_0/B:col10/1732785699095/Put/seqid=0 2024-11-28T09:21:39,922 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:39,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742012_1188 (size=12001) 2024-11-28T09:21:39,923 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-28T09:21:39,923 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:39,923 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. as already flushing 2024-11-28T09:21:39,923 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:39,924 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:39,924 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:39,924 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/B/4f7c9f26ae9449c3bd0c59ae48595b69 2024-11-28T09:21:39,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:39,938 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/C/1acffbefa4734a03be4ffb04fe65160f is 50, key is test_row_0/C:col10/1732785699095/Put/seqid=0 2024-11-28T09:21:39,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742013_1189 (size=12001) 2024-11-28T09:21:39,956 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/C/1acffbefa4734a03be4ffb04fe65160f 2024-11-28T09:21:39,964 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/583b4e1dcf674169a786ce0288f845ac as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/583b4e1dcf674169a786ce0288f845ac 2024-11-28T09:21:39,965 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:39,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57190 deadline: 1732785759963, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:39,966 DEBUG [Thread-767 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4157 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a., hostname=363d8d38a970,33819,1732785660637, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-28T09:21:39,970 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/583b4e1dcf674169a786ce0288f845ac, entries=250, sequenceid=121, filesize=47.0 K 2024-11-28T09:21:39,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:39,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:39,976 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/B/4f7c9f26ae9449c3bd0c59ae48595b69 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/4f7c9f26ae9449c3bd0c59ae48595b69 2024-11-28T09:21:39,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:39,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:39,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:39,982 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/4f7c9f26ae9449c3bd0c59ae48595b69, entries=150, sequenceid=121, filesize=11.7 K 2024-11-28T09:21:39,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:39,983 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/C/1acffbefa4734a03be4ffb04fe65160f as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/1acffbefa4734a03be4ffb04fe65160f 2024-11-28T09:21:39,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:39,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:39,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:39,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:39,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:39,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:39,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:39,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:39,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:39,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:39,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:39,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:39,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:39,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:39,989 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/1acffbefa4734a03be4ffb04fe65160f, entries=150, sequenceid=121, filesize=11.7 K 2024-11-28T09:21:39,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:39,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:39,990 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=40.25 KB/41220 for 7dfd6fe000bc8ebd2fc5a572e1596e7a in 571ms, sequenceid=121, compaction requested=true 2024-11-28T09:21:39,990 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7dfd6fe000bc8ebd2fc5a572e1596e7a: 2024-11-28T09:21:39,990 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7dfd6fe000bc8ebd2fc5a572e1596e7a:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T09:21:39,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:39,990 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T09:21:39,990 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:21:39,990 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7dfd6fe000bc8ebd2fc5a572e1596e7a:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T09:21:39,990 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:21:39,990 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7dfd6fe000bc8ebd2fc5a572e1596e7a:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T09:21:39,990 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-28T09:21:39,991 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T09:21:39,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:39,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:39,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:39,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:39,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:39,992 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 141107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T09:21:39,992 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T09:21:39,992 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1540): 7dfd6fe000bc8ebd2fc5a572e1596e7a/A is initiating minor compaction (all files) 2024-11-28T09:21:39,992 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): 7dfd6fe000bc8ebd2fc5a572e1596e7a/B is initiating minor compaction (all files) 2024-11-28T09:21:39,992 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7dfd6fe000bc8ebd2fc5a572e1596e7a/A in TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:39,993 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7dfd6fe000bc8ebd2fc5a572e1596e7a/B in TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:39,993 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/71d1ec676a3d4b76aa414abb1e3877a2, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/64a06dc61c9b4ff0948cf6d229cd2a46, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/eb655df444234c53babea1370fbc0bd4, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/583b4e1dcf674169a786ce0288f845ac] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp, totalSize=137.8 K 2024-11-28T09:21:39,993 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:39,993 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/c0c94a3ccb284f78a8a23b609b64d3f2, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/c63812a0f67543b9a093d014e80c8d96, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/1d6f8adf3cef433aba3fa0153ee56251, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/4f7c9f26ae9449c3bd0c59ae48595b69] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp, totalSize=47.0 K 2024-11-28T09:21:39,993 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. files: [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/71d1ec676a3d4b76aa414abb1e3877a2, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/64a06dc61c9b4ff0948cf6d229cd2a46, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/eb655df444234c53babea1370fbc0bd4, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/583b4e1dcf674169a786ce0288f845ac] 2024-11-28T09:21:39,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:39,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:39,993 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 71d1ec676a3d4b76aa414abb1e3877a2, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1732785695280 2024-11-28T09:21:39,993 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting c0c94a3ccb284f78a8a23b609b64d3f2, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1732785695280 2024-11-28T09:21:39,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:39,994 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 64a06dc61c9b4ff0948cf6d229cd2a46, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1732785695758 2024-11-28T09:21:39,994 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting c63812a0f67543b9a093d014e80c8d96, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1732785695758 2024-11-28T09:21:39,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:39,995 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting eb655df444234c53babea1370fbc0bd4, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1732785696939 2024-11-28T09:21:39,995 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 1d6f8adf3cef433aba3fa0153ee56251, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1732785696939 2024-11-28T09:21:39,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:39,995 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 583b4e1dcf674169a786ce0288f845ac, keycount=250, bloomtype=ROW, size=47.0 K, encoding=NONE, compression=NONE, seqNum=121, earliestPutTs=1732785699095 2024-11-28T09:21:39,995 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 4f7c9f26ae9449c3bd0c59ae48595b69, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=121, earliestPutTs=1732785699095 2024-11-28T09:21:39,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:39,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:39,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:39,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:39,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:39,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:39,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:39,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,006 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,015 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7dfd6fe000bc8ebd2fc5a572e1596e7a#B#compaction#165 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:21:40,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,016 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/B/2d8d81e2eb9049e68a167a06e9c3fbe3 is 50, key is test_row_0/B:col10/1732785699095/Put/seqid=0 2024-11-28T09:21:40,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,022 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=7dfd6fe000bc8ebd2fc5a572e1596e7a] 2024-11-28T09:21:40,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,039 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411285a3236be975948d1a8ae960ba5ce37b4_7dfd6fe000bc8ebd2fc5a572e1596e7a store=[table=TestAcidGuarantees family=A region=7dfd6fe000bc8ebd2fc5a572e1596e7a] 2024-11-28T09:21:40,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,044 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411285a3236be975948d1a8ae960ba5ce37b4_7dfd6fe000bc8ebd2fc5a572e1596e7a, store=[table=TestAcidGuarantees family=A region=7dfd6fe000bc8ebd2fc5a572e1596e7a] 2024-11-28T09:21:40,044 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411285a3236be975948d1a8ae960ba5ce37b4_7dfd6fe000bc8ebd2fc5a572e1596e7a because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=7dfd6fe000bc8ebd2fc5a572e1596e7a] 2024-11-28T09:21:40,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742014_1190 (size=12241) 2024-11-28T09:21:40,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 7dfd6fe000bc8ebd2fc5a572e1596e7a 2024-11-28T09:21:40,056 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7dfd6fe000bc8ebd2fc5a572e1596e7a 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-28T09:21:40,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,057 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7dfd6fe000bc8ebd2fc5a572e1596e7a, store=A 2024-11-28T09:21:40,058 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:40,058 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7dfd6fe000bc8ebd2fc5a572e1596e7a, store=B 2024-11-28T09:21:40,058 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:40,058 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7dfd6fe000bc8ebd2fc5a572e1596e7a, store=C 2024-11-28T09:21:40,058 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:40,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,061 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/B/2d8d81e2eb9049e68a167a06e9c3fbe3 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/2d8d81e2eb9049e68a167a06e9c3fbe3 2024-11-28T09:21:40,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742015_1191 (size=4469) 2024-11-28T09:21:40,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,070 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 7dfd6fe000bc8ebd2fc5a572e1596e7a/B of 7dfd6fe000bc8ebd2fc5a572e1596e7a into 2d8d81e2eb9049e68a167a06e9c3fbe3(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:21:40,070 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7dfd6fe000bc8ebd2fc5a572e1596e7a: 2024-11-28T09:21:40,070 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a., storeName=7dfd6fe000bc8ebd2fc5a572e1596e7a/B, priority=12, startTime=1732785699990; duration=0sec 2024-11-28T09:21:40,070 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:21:40,070 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7dfd6fe000bc8ebd2fc5a572e1596e7a:B 2024-11-28T09:21:40,070 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T09:21:40,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-28T09:21:40,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,076 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:40,076 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-28T09:21:40,077 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:40,077 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. as already flushing 2024-11-28T09:21:40,077 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:40,077 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:40,077 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:40,078 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T09:21:40,078 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): 7dfd6fe000bc8ebd2fc5a572e1596e7a/C is initiating minor compaction (all files) 2024-11-28T09:21:40,078 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7dfd6fe000bc8ebd2fc5a572e1596e7a/C in TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:40,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,078 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/b10aa5e6c52844d498ec068d5d57c148, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/30a4825ac01a4e2cbc017661982a0d88, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/dc8d391059cc4eb4a325435479d7f975, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/1acffbefa4734a03be4ffb04fe65160f] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp, totalSize=47.0 K 2024-11-28T09:21:40,079 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting b10aa5e6c52844d498ec068d5d57c148, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1732785695280 2024-11-28T09:21:40,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:40,079 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 30a4825ac01a4e2cbc017661982a0d88, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1732785695758 2024-11-28T09:21:40,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,080 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting dc8d391059cc4eb4a325435479d7f975, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1732785696939 2024-11-28T09:21:40,080 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 1acffbefa4734a03be4ffb04fe65160f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=121, earliestPutTs=1732785699095 2024-11-28T09:21:40,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,083 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7dfd6fe000bc8ebd2fc5a572e1596e7a#A#compaction#166 average throughput is 0.41 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:21:40,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,084 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/bdd8be3a5a9049018d23eb3c4011cf8b is 175, key is test_row_0/A:col10/1732785699095/Put/seqid=0 2024-11-28T09:21:40,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,086 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411287eb206f27b2a45b4ba776b642905f232_7dfd6fe000bc8ebd2fc5a572e1596e7a is 50, key is test_row_0/A:col10/1732785700056/Put/seqid=0 2024-11-28T09:21:40,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742016_1192 (size=31195) 2024-11-28T09:21:40,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,118 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7dfd6fe000bc8ebd2fc5a572e1596e7a#C#compaction#168 average throughput is 1.64 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:21:40,118 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/C/a462017cafc3492aa3ad24f7b20fb5b0 is 50, key is test_row_0/C:col10/1732785699095/Put/seqid=0 2024-11-28T09:21:40,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,136 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:40,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57180 deadline: 1732785760130, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:40,137 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:40,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57148 deadline: 1732785760132, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:40,138 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:40,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57184 deadline: 1732785760133, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:40,140 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:40,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57156 deadline: 1732785760136, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:40,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742017_1193 (size=17234) 2024-11-28T09:21:40,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742018_1194 (size=12241) 2024-11-28T09:21:40,169 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/C/a462017cafc3492aa3ad24f7b20fb5b0 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/a462017cafc3492aa3ad24f7b20fb5b0 2024-11-28T09:21:40,177 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 7dfd6fe000bc8ebd2fc5a572e1596e7a/C of 7dfd6fe000bc8ebd2fc5a572e1596e7a into a462017cafc3492aa3ad24f7b20fb5b0(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:21:40,177 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7dfd6fe000bc8ebd2fc5a572e1596e7a: 2024-11-28T09:21:40,177 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a., storeName=7dfd6fe000bc8ebd2fc5a572e1596e7a/C, priority=12, startTime=1732785699990; duration=0sec 2024-11-28T09:21:40,177 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:21:40,177 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7dfd6fe000bc8ebd2fc5a572e1596e7a:C 2024-11-28T09:21:40,231 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:40,231 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-28T09:21:40,231 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:40,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. as already flushing 2024-11-28T09:21:40,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:40,232 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:40,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:40,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:40,239 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:40,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57180 deadline: 1732785760238, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:40,246 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:40,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57148 deadline: 1732785760244, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:40,246 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:40,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57184 deadline: 1732785760245, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:40,247 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:40,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57156 deadline: 1732785760245, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:40,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-28T09:21:40,384 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:40,385 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-28T09:21:40,385 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:40,385 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. as already flushing 2024-11-28T09:21:40,385 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:40,385 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:40,386 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:40,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:40,390 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-11-28T09:21:40,390 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees Metrics about Tables on a single HBase RegionServer 2024-11-28T09:21:40,443 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:40,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57180 deadline: 1732785760442, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:40,449 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:40,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57184 deadline: 1732785760448, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:40,450 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:40,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57148 deadline: 1732785760448, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:40,450 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:40,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57156 deadline: 1732785760449, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:40,519 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/bdd8be3a5a9049018d23eb3c4011cf8b as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/bdd8be3a5a9049018d23eb3c4011cf8b 2024-11-28T09:21:40,526 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 7dfd6fe000bc8ebd2fc5a572e1596e7a/A of 7dfd6fe000bc8ebd2fc5a572e1596e7a into bdd8be3a5a9049018d23eb3c4011cf8b(size=30.5 K), total size for store is 30.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:21:40,526 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7dfd6fe000bc8ebd2fc5a572e1596e7a: 2024-11-28T09:21:40,526 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a., storeName=7dfd6fe000bc8ebd2fc5a572e1596e7a/A, priority=12, startTime=1732785699990; duration=0sec 2024-11-28T09:21:40,526 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:21:40,526 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7dfd6fe000bc8ebd2fc5a572e1596e7a:A 2024-11-28T09:21:40,539 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:40,539 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-28T09:21:40,539 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:40,539 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. as already flushing 2024-11-28T09:21:40,539 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:40,540 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:40,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:40,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:40,555 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:40,560 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411287eb206f27b2a45b4ba776b642905f232_7dfd6fe000bc8ebd2fc5a572e1596e7a to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411287eb206f27b2a45b4ba776b642905f232_7dfd6fe000bc8ebd2fc5a572e1596e7a 2024-11-28T09:21:40,561 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/e11f2d03998b41ed98af278769030a84, store: [table=TestAcidGuarantees family=A region=7dfd6fe000bc8ebd2fc5a572e1596e7a] 2024-11-28T09:21:40,562 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/e11f2d03998b41ed98af278769030a84 is 175, key is test_row_0/A:col10/1732785700056/Put/seqid=0 2024-11-28T09:21:40,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742019_1195 (size=48335) 2024-11-28T09:21:40,590 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=133, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/e11f2d03998b41ed98af278769030a84 2024-11-28T09:21:40,603 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/B/fabf405dea7942f990f231d61577e492 is 50, key is test_row_0/B:col10/1732785700056/Put/seqid=0 2024-11-28T09:21:40,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742020_1196 (size=9757) 2024-11-28T09:21:40,648 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=133 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/B/fabf405dea7942f990f231d61577e492 2024-11-28T09:21:40,670 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/C/145084ead33646b6a4ba4f1fb4ff53e1 is 50, key is test_row_0/C:col10/1732785700056/Put/seqid=0 2024-11-28T09:21:40,692 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:40,693 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-28T09:21:40,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:40,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. as already flushing 2024-11-28T09:21:40,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:40,693 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:40,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:40,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:40,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742021_1197 (size=9757) 2024-11-28T09:21:40,747 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:40,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57180 deadline: 1732785760745, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:40,753 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:40,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57184 deadline: 1732785760752, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:40,754 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:40,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57148 deadline: 1732785760752, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:40,755 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:40,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57156 deadline: 1732785760754, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:40,845 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:40,846 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-28T09:21:40,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:40,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. as already flushing 2024-11-28T09:21:40,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:40,846 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:40,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:40,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:40,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-28T09:21:40,998 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:40,999 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-28T09:21:40,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:40,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. as already flushing 2024-11-28T09:21:40,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:40,999 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:41,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:41,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:41,124 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=133 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/C/145084ead33646b6a4ba4f1fb4ff53e1 2024-11-28T09:21:41,137 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/e11f2d03998b41ed98af278769030a84 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/e11f2d03998b41ed98af278769030a84 2024-11-28T09:21:41,142 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/e11f2d03998b41ed98af278769030a84, entries=250, sequenceid=133, filesize=47.2 K 2024-11-28T09:21:41,144 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/B/fabf405dea7942f990f231d61577e492 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/fabf405dea7942f990f231d61577e492 2024-11-28T09:21:41,150 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/fabf405dea7942f990f231d61577e492, entries=100, sequenceid=133, filesize=9.5 K 2024-11-28T09:21:41,151 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:41,152 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/C/145084ead33646b6a4ba4f1fb4ff53e1 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/145084ead33646b6a4ba4f1fb4ff53e1 2024-11-28T09:21:41,154 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-28T09:21:41,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:41,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. as already flushing 2024-11-28T09:21:41,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:41,154 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:41,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:41,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:41,158 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/145084ead33646b6a4ba4f1fb4ff53e1, entries=100, sequenceid=133, filesize=9.5 K 2024-11-28T09:21:41,159 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 7dfd6fe000bc8ebd2fc5a572e1596e7a in 1103ms, sequenceid=133, compaction requested=false 2024-11-28T09:21:41,159 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7dfd6fe000bc8ebd2fc5a572e1596e7a: 2024-11-28T09:21:41,251 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7dfd6fe000bc8ebd2fc5a572e1596e7a 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-28T09:21:41,252 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7dfd6fe000bc8ebd2fc5a572e1596e7a, store=A 2024-11-28T09:21:41,252 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:41,252 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7dfd6fe000bc8ebd2fc5a572e1596e7a, store=B 2024-11-28T09:21:41,252 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:41,252 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7dfd6fe000bc8ebd2fc5a572e1596e7a, store=C 2024-11-28T09:21:41,252 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:41,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 7dfd6fe000bc8ebd2fc5a572e1596e7a 2024-11-28T09:21:41,262 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411285f7d79336e124fe0ae02d24b301c5086_7dfd6fe000bc8ebd2fc5a572e1596e7a is 50, key is test_row_0/A:col10/1732785700126/Put/seqid=0 2024-11-28T09:21:41,269 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:41,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57148 deadline: 1732785761268, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:41,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742022_1198 (size=12304) 2024-11-28T09:21:41,271 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:41,271 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:41,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57184 deadline: 1732785761269, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:41,271 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:41,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57156 deadline: 1732785761269, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:41,272 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:41,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57180 deadline: 1732785761270, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:41,277 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411285f7d79336e124fe0ae02d24b301c5086_7dfd6fe000bc8ebd2fc5a572e1596e7a to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411285f7d79336e124fe0ae02d24b301c5086_7dfd6fe000bc8ebd2fc5a572e1596e7a 2024-11-28T09:21:41,279 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/3424388841854f1ba988e7db88b3fcec, store: [table=TestAcidGuarantees family=A region=7dfd6fe000bc8ebd2fc5a572e1596e7a] 2024-11-28T09:21:41,279 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/3424388841854f1ba988e7db88b3fcec is 175, key is test_row_0/A:col10/1732785700126/Put/seqid=0 2024-11-28T09:21:41,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742023_1199 (size=31105) 2024-11-28T09:21:41,290 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=161, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/3424388841854f1ba988e7db88b3fcec 2024-11-28T09:21:41,298 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/B/a77f673491374add8b1d67c3157ac18d is 50, key is test_row_0/B:col10/1732785700126/Put/seqid=0 2024-11-28T09:21:41,307 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:41,307 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-28T09:21:41,307 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:41,307 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. as already flushing 2024-11-28T09:21:41,307 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:41,307 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:41,308 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:41,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:41,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742024_1200 (size=12151) 2024-11-28T09:21:41,375 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:41,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57148 deadline: 1732785761371, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:41,375 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:41,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57156 deadline: 1732785761373, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:41,376 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:41,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57184 deadline: 1732785761372, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:41,376 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:41,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57180 deadline: 1732785761373, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:41,460 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:41,461 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-28T09:21:41,461 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:41,461 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. as already flushing 2024-11-28T09:21:41,461 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:41,461 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:41,461 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:41,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:41,580 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:41,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57156 deadline: 1732785761577, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:41,580 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:41,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57148 deadline: 1732785761578, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:41,580 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:41,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57180 deadline: 1732785761578, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:41,580 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:41,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57184 deadline: 1732785761578, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:41,613 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:41,614 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-28T09:21:41,614 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:41,614 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. as already flushing 2024-11-28T09:21:41,614 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:41,614 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:41,615 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:41,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:41,725 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=161 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/B/a77f673491374add8b1d67c3157ac18d 2024-11-28T09:21:41,735 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/C/41bff7fdb32745c2b1ff945aa8b716fe is 50, key is test_row_0/C:col10/1732785700126/Put/seqid=0 2024-11-28T09:21:41,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742025_1201 (size=12151) 2024-11-28T09:21:41,749 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=161 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/C/41bff7fdb32745c2b1ff945aa8b716fe 2024-11-28T09:21:41,756 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/3424388841854f1ba988e7db88b3fcec as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/3424388841854f1ba988e7db88b3fcec 2024-11-28T09:21:41,761 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/3424388841854f1ba988e7db88b3fcec, entries=150, sequenceid=161, filesize=30.4 K 2024-11-28T09:21:41,768 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:41,768 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-28T09:21:41,769 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:41,769 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. as already flushing 2024-11-28T09:21:41,769 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:41,769 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:41,769 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:41,770 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/B/a77f673491374add8b1d67c3157ac18d as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/a77f673491374add8b1d67c3157ac18d 2024-11-28T09:21:41,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:41,778 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/a77f673491374add8b1d67c3157ac18d, entries=150, sequenceid=161, filesize=11.9 K 2024-11-28T09:21:41,781 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/C/41bff7fdb32745c2b1ff945aa8b716fe as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/41bff7fdb32745c2b1ff945aa8b716fe 2024-11-28T09:21:41,789 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/41bff7fdb32745c2b1ff945aa8b716fe, entries=150, sequenceid=161, filesize=11.9 K 2024-11-28T09:21:41,790 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=67.09 KB/68700 for 7dfd6fe000bc8ebd2fc5a572e1596e7a in 539ms, sequenceid=161, compaction requested=true 2024-11-28T09:21:41,790 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7dfd6fe000bc8ebd2fc5a572e1596e7a: 2024-11-28T09:21:41,791 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:21:41,791 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7dfd6fe000bc8ebd2fc5a572e1596e7a:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T09:21:41,791 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:21:41,791 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7dfd6fe000bc8ebd2fc5a572e1596e7a:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T09:21:41,791 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:21:41,791 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:21:41,791 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7dfd6fe000bc8ebd2fc5a572e1596e7a:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T09:21:41,791 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:21:41,792 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 110635 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:21:41,792 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1540): 7dfd6fe000bc8ebd2fc5a572e1596e7a/A is initiating minor compaction (all files) 2024-11-28T09:21:41,792 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7dfd6fe000bc8ebd2fc5a572e1596e7a/A in TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:41,792 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/bdd8be3a5a9049018d23eb3c4011cf8b, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/e11f2d03998b41ed98af278769030a84, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/3424388841854f1ba988e7db88b3fcec] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp, totalSize=108.0 K 2024-11-28T09:21:41,793 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:41,793 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. files: [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/bdd8be3a5a9049018d23eb3c4011cf8b, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/e11f2d03998b41ed98af278769030a84, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/3424388841854f1ba988e7db88b3fcec] 2024-11-28T09:21:41,793 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:21:41,793 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): 7dfd6fe000bc8ebd2fc5a572e1596e7a/B is initiating minor compaction (all files) 2024-11-28T09:21:41,793 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7dfd6fe000bc8ebd2fc5a572e1596e7a/B in TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:41,793 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/2d8d81e2eb9049e68a167a06e9c3fbe3, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/fabf405dea7942f990f231d61577e492, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/a77f673491374add8b1d67c3157ac18d] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp, totalSize=33.3 K 2024-11-28T09:21:41,793 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting bdd8be3a5a9049018d23eb3c4011cf8b, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=121, earliestPutTs=1732785699095 2024-11-28T09:21:41,794 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 2d8d81e2eb9049e68a167a06e9c3fbe3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=121, earliestPutTs=1732785699095 2024-11-28T09:21:41,794 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting e11f2d03998b41ed98af278769030a84, keycount=250, bloomtype=ROW, size=47.2 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1732785699429 2024-11-28T09:21:41,794 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting fabf405dea7942f990f231d61577e492, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1732785700054 2024-11-28T09:21:41,794 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3424388841854f1ba988e7db88b3fcec, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=161, earliestPutTs=1732785700126 2024-11-28T09:21:41,794 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting a77f673491374add8b1d67c3157ac18d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=161, earliestPutTs=1732785700126 2024-11-28T09:21:41,815 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=7dfd6fe000bc8ebd2fc5a572e1596e7a] 2024-11-28T09:21:41,830 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7dfd6fe000bc8ebd2fc5a572e1596e7a#B#compaction#175 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:21:41,830 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/B/77ee0aae888248079b3973bfd93c4592 is 50, key is test_row_0/B:col10/1732785700126/Put/seqid=0 2024-11-28T09:21:41,836 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024112812d48b6201d346579d2a1dde0b8c00fb_7dfd6fe000bc8ebd2fc5a572e1596e7a store=[table=TestAcidGuarantees family=A region=7dfd6fe000bc8ebd2fc5a572e1596e7a] 2024-11-28T09:21:41,838 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024112812d48b6201d346579d2a1dde0b8c00fb_7dfd6fe000bc8ebd2fc5a572e1596e7a, store=[table=TestAcidGuarantees family=A region=7dfd6fe000bc8ebd2fc5a572e1596e7a] 2024-11-28T09:21:41,839 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112812d48b6201d346579d2a1dde0b8c00fb_7dfd6fe000bc8ebd2fc5a572e1596e7a because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=7dfd6fe000bc8ebd2fc5a572e1596e7a] 2024-11-28T09:21:41,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742026_1202 (size=12493) 2024-11-28T09:21:41,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-28T09:21:41,879 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/B/77ee0aae888248079b3973bfd93c4592 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/77ee0aae888248079b3973bfd93c4592 2024-11-28T09:21:41,886 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7dfd6fe000bc8ebd2fc5a572e1596e7a/B of 7dfd6fe000bc8ebd2fc5a572e1596e7a into 77ee0aae888248079b3973bfd93c4592(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:21:41,886 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7dfd6fe000bc8ebd2fc5a572e1596e7a: 2024-11-28T09:21:41,886 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a., storeName=7dfd6fe000bc8ebd2fc5a572e1596e7a/B, priority=13, startTime=1732785701791; duration=0sec 2024-11-28T09:21:41,886 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:21:41,886 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7dfd6fe000bc8ebd2fc5a572e1596e7a:B 2024-11-28T09:21:41,886 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:21:41,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 7dfd6fe000bc8ebd2fc5a572e1596e7a 2024-11-28T09:21:41,888 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7dfd6fe000bc8ebd2fc5a572e1596e7a 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-28T09:21:41,888 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7dfd6fe000bc8ebd2fc5a572e1596e7a, store=A 2024-11-28T09:21:41,888 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:41,888 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7dfd6fe000bc8ebd2fc5a572e1596e7a, store=B 2024-11-28T09:21:41,888 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:41,888 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7dfd6fe000bc8ebd2fc5a572e1596e7a, store=C 2024-11-28T09:21:41,888 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:41,889 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34149 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:21:41,889 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): 7dfd6fe000bc8ebd2fc5a572e1596e7a/C is initiating minor compaction (all files) 2024-11-28T09:21:41,889 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7dfd6fe000bc8ebd2fc5a572e1596e7a/C in TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:41,889 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/a462017cafc3492aa3ad24f7b20fb5b0, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/145084ead33646b6a4ba4f1fb4ff53e1, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/41bff7fdb32745c2b1ff945aa8b716fe] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp, totalSize=33.3 K 2024-11-28T09:21:41,889 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting a462017cafc3492aa3ad24f7b20fb5b0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=121, earliestPutTs=1732785699095 2024-11-28T09:21:41,890 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 145084ead33646b6a4ba4f1fb4ff53e1, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1732785700054 2024-11-28T09:21:41,890 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 41bff7fdb32745c2b1ff945aa8b716fe, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=161, earliestPutTs=1732785700126 2024-11-28T09:21:41,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742027_1203 (size=4469) 2024-11-28T09:21:41,907 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7dfd6fe000bc8ebd2fc5a572e1596e7a#A#compaction#174 average throughput is 0.27 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:21:41,907 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/eb21893ed82743429364bbc28c630a44 is 175, key is test_row_0/A:col10/1732785700126/Put/seqid=0 2024-11-28T09:21:41,916 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7dfd6fe000bc8ebd2fc5a572e1596e7a#C#compaction#176 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:21:41,916 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/C/463fd18b1d374bae937478c218997108 is 50, key is test_row_0/C:col10/1732785700126/Put/seqid=0 2024-11-28T09:21:41,921 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:41,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57184 deadline: 1732785761917, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:41,921 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:41,921 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:41,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57148 deadline: 1732785761918, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:41,922 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:41,922 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-28T09:21:41,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57156 deadline: 1732785761918, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:41,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:41,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. as already flushing 2024-11-28T09:21:41,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:41,922 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:41,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:41,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:41,924 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:41,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57180 deadline: 1732785761921, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:41,929 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128be365341446841f9a74ebb24a4f6e343_7dfd6fe000bc8ebd2fc5a572e1596e7a is 50, key is test_row_0/A:col10/1732785701887/Put/seqid=0 2024-11-28T09:21:41,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742028_1204 (size=31447) 2024-11-28T09:21:41,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742029_1205 (size=12493) 2024-11-28T09:21:41,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742030_1206 (size=12304) 2024-11-28T09:21:41,964 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:41,969 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128be365341446841f9a74ebb24a4f6e343_7dfd6fe000bc8ebd2fc5a572e1596e7a to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128be365341446841f9a74ebb24a4f6e343_7dfd6fe000bc8ebd2fc5a572e1596e7a 2024-11-28T09:21:41,970 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/bdd3f3044fd44d1586ccb52e15789b94, store: [table=TestAcidGuarantees family=A region=7dfd6fe000bc8ebd2fc5a572e1596e7a] 2024-11-28T09:21:41,970 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/bdd3f3044fd44d1586ccb52e15789b94 is 175, key is test_row_0/A:col10/1732785701887/Put/seqid=0 2024-11-28T09:21:41,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742031_1207 (size=31105) 2024-11-28T09:21:41,978 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=176, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/bdd3f3044fd44d1586ccb52e15789b94 2024-11-28T09:21:41,993 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/B/23fd07e16a714fa6a6ed5ac99bc75a71 is 50, key is test_row_0/B:col10/1732785701887/Put/seqid=0 2024-11-28T09:21:42,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742032_1208 (size=12151) 2024-11-28T09:21:42,002 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=176 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/B/23fd07e16a714fa6a6ed5ac99bc75a71 2024-11-28T09:21:42,016 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/C/b578ef462fb34230b6ccd7ade237150b is 50, key is test_row_0/C:col10/1732785701887/Put/seqid=0 2024-11-28T09:21:42,024 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:42,024 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:42,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57184 deadline: 1732785762022, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:42,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57148 deadline: 1732785762023, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:42,024 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:42,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57156 deadline: 1732785762023, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:42,027 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:42,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57180 deadline: 1732785762025, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:42,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742033_1209 (size=12151) 2024-11-28T09:21:42,075 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:42,075 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-28T09:21:42,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:42,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. as already flushing 2024-11-28T09:21:42,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:42,076 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:42,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:42,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:42,226 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:42,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57156 deadline: 1732785762225, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:42,229 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:42,229 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:42,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57148 deadline: 1732785762226, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:42,230 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:42,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57184 deadline: 1732785762229, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:42,230 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:42,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57180 deadline: 1732785762229, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:42,231 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-28T09:21:42,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:42,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. as already flushing 2024-11-28T09:21:42,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:42,232 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:42,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:42,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:42,345 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/eb21893ed82743429364bbc28c630a44 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/eb21893ed82743429364bbc28c630a44 2024-11-28T09:21:42,352 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7dfd6fe000bc8ebd2fc5a572e1596e7a/A of 7dfd6fe000bc8ebd2fc5a572e1596e7a into eb21893ed82743429364bbc28c630a44(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:21:42,352 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7dfd6fe000bc8ebd2fc5a572e1596e7a: 2024-11-28T09:21:42,352 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a., storeName=7dfd6fe000bc8ebd2fc5a572e1596e7a/A, priority=13, startTime=1732785701790; duration=0sec 2024-11-28T09:21:42,352 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:21:42,352 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7dfd6fe000bc8ebd2fc5a572e1596e7a:A 2024-11-28T09:21:42,357 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/C/463fd18b1d374bae937478c218997108 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/463fd18b1d374bae937478c218997108 2024-11-28T09:21:42,362 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7dfd6fe000bc8ebd2fc5a572e1596e7a/C of 7dfd6fe000bc8ebd2fc5a572e1596e7a into 463fd18b1d374bae937478c218997108(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:21:42,362 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7dfd6fe000bc8ebd2fc5a572e1596e7a: 2024-11-28T09:21:42,362 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a., storeName=7dfd6fe000bc8ebd2fc5a572e1596e7a/C, priority=13, startTime=1732785701791; duration=0sec 2024-11-28T09:21:42,362 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:21:42,362 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7dfd6fe000bc8ebd2fc5a572e1596e7a:C 2024-11-28T09:21:42,385 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:42,386 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-28T09:21:42,386 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:42,386 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. as already flushing 2024-11-28T09:21:42,386 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:42,386 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:42,386 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:42,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:42,439 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=176 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/C/b578ef462fb34230b6ccd7ade237150b 2024-11-28T09:21:42,446 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/bdd3f3044fd44d1586ccb52e15789b94 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/bdd3f3044fd44d1586ccb52e15789b94 2024-11-28T09:21:42,453 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/bdd3f3044fd44d1586ccb52e15789b94, entries=150, sequenceid=176, filesize=30.4 K 2024-11-28T09:21:42,454 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/B/23fd07e16a714fa6a6ed5ac99bc75a71 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/23fd07e16a714fa6a6ed5ac99bc75a71 2024-11-28T09:21:42,461 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/23fd07e16a714fa6a6ed5ac99bc75a71, entries=150, sequenceid=176, filesize=11.9 K 2024-11-28T09:21:42,463 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/C/b578ef462fb34230b6ccd7ade237150b as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/b578ef462fb34230b6ccd7ade237150b 2024-11-28T09:21:42,469 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/b578ef462fb34230b6ccd7ade237150b, entries=150, sequenceid=176, filesize=11.9 K 2024-11-28T09:21:42,475 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 7dfd6fe000bc8ebd2fc5a572e1596e7a in 587ms, sequenceid=176, compaction requested=false 2024-11-28T09:21:42,475 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7dfd6fe000bc8ebd2fc5a572e1596e7a: 2024-11-28T09:21:42,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 7dfd6fe000bc8ebd2fc5a572e1596e7a 2024-11-28T09:21:42,531 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7dfd6fe000bc8ebd2fc5a572e1596e7a 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-28T09:21:42,531 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7dfd6fe000bc8ebd2fc5a572e1596e7a, store=A 2024-11-28T09:21:42,532 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:42,532 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7dfd6fe000bc8ebd2fc5a572e1596e7a, store=B 2024-11-28T09:21:42,532 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:42,532 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7dfd6fe000bc8ebd2fc5a572e1596e7a, store=C 2024-11-28T09:21:42,532 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:42,538 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:42,540 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-28T09:21:42,541 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:42,541 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. as already flushing 2024-11-28T09:21:42,541 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:42,541 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:42,541 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:42,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:42,549 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411284a731bd733934238b912325f1c64bc77_7dfd6fe000bc8ebd2fc5a572e1596e7a is 50, key is test_row_0/A:col10/1732785701919/Put/seqid=0 2024-11-28T09:21:42,553 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:42,553 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:42,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57156 deadline: 1732785762548, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:42,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57184 deadline: 1732785762549, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:42,554 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:42,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57148 deadline: 1732785762550, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:42,558 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:42,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57180 deadline: 1732785762553, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:42,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742034_1210 (size=12304) 2024-11-28T09:21:42,611 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:42,616 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411284a731bd733934238b912325f1c64bc77_7dfd6fe000bc8ebd2fc5a572e1596e7a to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411284a731bd733934238b912325f1c64bc77_7dfd6fe000bc8ebd2fc5a572e1596e7a 2024-11-28T09:21:42,617 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/eec067f126e24c849982a04c62e7f605, store: [table=TestAcidGuarantees family=A region=7dfd6fe000bc8ebd2fc5a572e1596e7a] 2024-11-28T09:21:42,618 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/eec067f126e24c849982a04c62e7f605 is 175, key is test_row_0/A:col10/1732785701919/Put/seqid=0 2024-11-28T09:21:42,656 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:42,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57148 deadline: 1732785762655, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:42,657 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:42,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57184 deadline: 1732785762655, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:42,657 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:42,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57156 deadline: 1732785762655, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:42,660 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:42,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57180 deadline: 1732785762659, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:42,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742035_1211 (size=31105) 2024-11-28T09:21:42,693 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:42,694 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-28T09:21:42,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:42,695 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. as already flushing 2024-11-28T09:21:42,695 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:42,695 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:42,695 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:42,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:42,849 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:42,850 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-28T09:21:42,850 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:42,850 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. as already flushing 2024-11-28T09:21:42,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:42,851 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:42,851 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:42,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:42,861 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:42,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57148 deadline: 1732785762858, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:42,864 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:42,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57184 deadline: 1732785762859, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:42,864 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:42,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57156 deadline: 1732785762859, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:42,865 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:42,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57180 deadline: 1732785762861, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:43,003 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:43,004 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-28T09:21:43,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:43,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. as already flushing 2024-11-28T09:21:43,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:43,004 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:43,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:43,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:43,083 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=201, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/eec067f126e24c849982a04c62e7f605 2024-11-28T09:21:43,094 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/B/38a88dd4b26742468ab0b072787b52c8 is 50, key is test_row_0/B:col10/1732785701919/Put/seqid=0 2024-11-28T09:21:43,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742036_1212 (size=12151) 2024-11-28T09:21:43,156 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:43,157 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-28T09:21:43,157 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:43,157 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. as already flushing 2024-11-28T09:21:43,157 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:43,158 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:43,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:43,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:43,163 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:43,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57148 deadline: 1732785763163, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:43,171 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:43,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57180 deadline: 1732785763167, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:43,171 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:43,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57184 deadline: 1732785763168, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:43,172 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:43,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57156 deadline: 1732785763168, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:43,312 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:43,313 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-28T09:21:43,313 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:43,313 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. as already flushing 2024-11-28T09:21:43,313 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:43,313 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:43,314 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:43,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:43,466 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:43,466 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-28T09:21:43,466 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:43,466 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. as already flushing 2024-11-28T09:21:43,467 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:43,467 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:43,467 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:43,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:43,526 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=201 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/B/38a88dd4b26742468ab0b072787b52c8 2024-11-28T09:21:43,535 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/C/c87c75507c7749d783e8ce112dc6a0eb is 50, key is test_row_0/C:col10/1732785701919/Put/seqid=0 2024-11-28T09:21:43,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742037_1213 (size=12151) 2024-11-28T09:21:43,619 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:43,619 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-28T09:21:43,620 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:43,620 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. as already flushing 2024-11-28T09:21:43,620 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:43,620 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:43,620 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:43,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:43,667 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:43,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57148 deadline: 1732785763667, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:43,674 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:43,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57184 deadline: 1732785763673, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:43,675 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:43,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57156 deadline: 1732785763675, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:43,678 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:43,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57180 deadline: 1732785763676, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:43,772 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:43,773 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-28T09:21:43,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:43,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. as already flushing 2024-11-28T09:21:43,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:43,773 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:43,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:43,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:43,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-28T09:21:43,926 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:43,926 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-28T09:21:43,926 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:43,927 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. as already flushing 2024-11-28T09:21:43,927 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:43,927 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:43,927 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:43,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:43,941 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=201 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/C/c87c75507c7749d783e8ce112dc6a0eb 2024-11-28T09:21:43,946 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/eec067f126e24c849982a04c62e7f605 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/eec067f126e24c849982a04c62e7f605 2024-11-28T09:21:43,951 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/eec067f126e24c849982a04c62e7f605, entries=150, sequenceid=201, filesize=30.4 K 2024-11-28T09:21:43,952 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/B/38a88dd4b26742468ab0b072787b52c8 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/38a88dd4b26742468ab0b072787b52c8 2024-11-28T09:21:43,958 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/38a88dd4b26742468ab0b072787b52c8, entries=150, sequenceid=201, filesize=11.9 K 2024-11-28T09:21:43,959 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/C/c87c75507c7749d783e8ce112dc6a0eb as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/c87c75507c7749d783e8ce112dc6a0eb 2024-11-28T09:21:43,964 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/c87c75507c7749d783e8ce112dc6a0eb, entries=150, sequenceid=201, filesize=11.9 K 2024-11-28T09:21:43,968 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 7dfd6fe000bc8ebd2fc5a572e1596e7a in 1437ms, sequenceid=201, compaction requested=true 2024-11-28T09:21:43,968 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7dfd6fe000bc8ebd2fc5a572e1596e7a: 2024-11-28T09:21:43,968 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7dfd6fe000bc8ebd2fc5a572e1596e7a:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T09:21:43,968 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:21:43,968 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:21:43,968 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7dfd6fe000bc8ebd2fc5a572e1596e7a:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T09:21:43,968 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:21:43,968 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7dfd6fe000bc8ebd2fc5a572e1596e7a:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T09:21:43,969 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:21:43,969 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:21:43,972 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93657 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:21:43,972 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:21:43,972 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): 7dfd6fe000bc8ebd2fc5a572e1596e7a/B is initiating minor compaction (all files) 2024-11-28T09:21:43,972 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1540): 7dfd6fe000bc8ebd2fc5a572e1596e7a/A is initiating minor compaction (all files) 2024-11-28T09:21:43,972 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7dfd6fe000bc8ebd2fc5a572e1596e7a/B in TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:43,972 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7dfd6fe000bc8ebd2fc5a572e1596e7a/A in TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:43,973 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/77ee0aae888248079b3973bfd93c4592, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/23fd07e16a714fa6a6ed5ac99bc75a71, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/38a88dd4b26742468ab0b072787b52c8] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp, totalSize=35.9 K 2024-11-28T09:21:43,973 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/eb21893ed82743429364bbc28c630a44, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/bdd3f3044fd44d1586ccb52e15789b94, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/eec067f126e24c849982a04c62e7f605] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp, totalSize=91.5 K 2024-11-28T09:21:43,973 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:43,973 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. files: [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/eb21893ed82743429364bbc28c630a44, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/bdd3f3044fd44d1586ccb52e15789b94, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/eec067f126e24c849982a04c62e7f605] 2024-11-28T09:21:43,973 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 77ee0aae888248079b3973bfd93c4592, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=161, earliestPutTs=1732785700126 2024-11-28T09:21:43,973 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting eb21893ed82743429364bbc28c630a44, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=161, earliestPutTs=1732785700126 2024-11-28T09:21:43,974 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting bdd3f3044fd44d1586ccb52e15789b94, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=176, earliestPutTs=1732785701267 2024-11-28T09:21:43,974 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 23fd07e16a714fa6a6ed5ac99bc75a71, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=176, earliestPutTs=1732785701267 2024-11-28T09:21:43,974 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting eec067f126e24c849982a04c62e7f605, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=201, earliestPutTs=1732785701915 2024-11-28T09:21:43,974 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 38a88dd4b26742468ab0b072787b52c8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=201, earliestPutTs=1732785701915 2024-11-28T09:21:43,984 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7dfd6fe000bc8ebd2fc5a572e1596e7a 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-28T09:21:43,984 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7dfd6fe000bc8ebd2fc5a572e1596e7a, store=A 2024-11-28T09:21:43,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 7dfd6fe000bc8ebd2fc5a572e1596e7a 2024-11-28T09:21:43,984 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:43,984 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7dfd6fe000bc8ebd2fc5a572e1596e7a, store=B 2024-11-28T09:21:43,984 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:43,984 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7dfd6fe000bc8ebd2fc5a572e1596e7a, store=C 2024-11-28T09:21:43,984 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:44,002 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=7dfd6fe000bc8ebd2fc5a572e1596e7a] 2024-11-28T09:21:44,009 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024112888456b8f8ef042909b70fb62eefc4447_7dfd6fe000bc8ebd2fc5a572e1596e7a store=[table=TestAcidGuarantees family=A region=7dfd6fe000bc8ebd2fc5a572e1596e7a] 2024-11-28T09:21:44,011 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7dfd6fe000bc8ebd2fc5a572e1596e7a#B#compaction#184 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:21:44,012 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/B/a0db175e655d459d8019c831a6834362 is 50, key is test_row_0/B:col10/1732785701919/Put/seqid=0 2024-11-28T09:21:44,012 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024112888456b8f8ef042909b70fb62eefc4447_7dfd6fe000bc8ebd2fc5a572e1596e7a, store=[table=TestAcidGuarantees family=A region=7dfd6fe000bc8ebd2fc5a572e1596e7a] 2024-11-28T09:21:44,012 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112888456b8f8ef042909b70fb62eefc4447_7dfd6fe000bc8ebd2fc5a572e1596e7a because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=7dfd6fe000bc8ebd2fc5a572e1596e7a] 2024-11-28T09:21:44,022 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411280f613b5d84184a07954add463d2e5ed4_7dfd6fe000bc8ebd2fc5a572e1596e7a is 50, key is test_row_0/A:col10/1732785702552/Put/seqid=0 2024-11-28T09:21:44,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742038_1214 (size=4469) 2024-11-28T09:21:44,050 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7dfd6fe000bc8ebd2fc5a572e1596e7a#A#compaction#183 average throughput is 0.51 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:21:44,051 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/3f50cdb5395c4c9c8d43bf2edf503013 is 175, key is test_row_0/A:col10/1732785701919/Put/seqid=0 2024-11-28T09:21:44,059 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:44,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57190 deadline: 1732785764056, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:44,079 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:44,080 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-28T09:21:44,080 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:44,080 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. as already flushing 2024-11-28T09:21:44,080 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:44,080 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] handler.RSProcedureHandler(58): pid=56 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:44,081 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=56 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:44,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742039_1215 (size=12595) 2024-11-28T09:21:44,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=56 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:44,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742040_1216 (size=14794) 2024-11-28T09:21:44,096 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:44,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742041_1217 (size=31549) 2024-11-28T09:21:44,103 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411280f613b5d84184a07954add463d2e5ed4_7dfd6fe000bc8ebd2fc5a572e1596e7a to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411280f613b5d84184a07954add463d2e5ed4_7dfd6fe000bc8ebd2fc5a572e1596e7a 2024-11-28T09:21:44,104 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/d1174b57b40b407698e21076bcc8f71d, store: [table=TestAcidGuarantees family=A region=7dfd6fe000bc8ebd2fc5a572e1596e7a] 2024-11-28T09:21:44,105 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/d1174b57b40b407698e21076bcc8f71d is 175, key is test_row_0/A:col10/1732785702552/Put/seqid=0 2024-11-28T09:21:44,111 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/3f50cdb5395c4c9c8d43bf2edf503013 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/3f50cdb5395c4c9c8d43bf2edf503013 2024-11-28T09:21:44,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742042_1218 (size=39749) 2024-11-28T09:21:44,118 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=215, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/d1174b57b40b407698e21076bcc8f71d 2024-11-28T09:21:44,120 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7dfd6fe000bc8ebd2fc5a572e1596e7a/A of 7dfd6fe000bc8ebd2fc5a572e1596e7a into 3f50cdb5395c4c9c8d43bf2edf503013(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:21:44,120 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7dfd6fe000bc8ebd2fc5a572e1596e7a: 2024-11-28T09:21:44,120 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a., storeName=7dfd6fe000bc8ebd2fc5a572e1596e7a/A, priority=13, startTime=1732785703968; duration=0sec 2024-11-28T09:21:44,120 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:21:44,120 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7dfd6fe000bc8ebd2fc5a572e1596e7a:A 2024-11-28T09:21:44,120 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:21:44,122 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:21:44,122 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1540): 7dfd6fe000bc8ebd2fc5a572e1596e7a/C is initiating minor compaction (all files) 2024-11-28T09:21:44,122 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7dfd6fe000bc8ebd2fc5a572e1596e7a/C in TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:44,123 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/463fd18b1d374bae937478c218997108, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/b578ef462fb34230b6ccd7ade237150b, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/c87c75507c7749d783e8ce112dc6a0eb] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp, totalSize=35.9 K 2024-11-28T09:21:44,123 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 463fd18b1d374bae937478c218997108, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=161, earliestPutTs=1732785700126 2024-11-28T09:21:44,128 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting b578ef462fb34230b6ccd7ade237150b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=176, earliestPutTs=1732785701267 2024-11-28T09:21:44,130 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting c87c75507c7749d783e8ce112dc6a0eb, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=201, earliestPutTs=1732785701915 2024-11-28T09:21:44,134 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/B/d7905fb379b14d56b4a44a94bcd8d425 is 50, key is test_row_0/B:col10/1732785702552/Put/seqid=0 2024-11-28T09:21:44,142 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7dfd6fe000bc8ebd2fc5a572e1596e7a#C#compaction#187 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:21:44,143 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/C/88c942143bfc41bbbbb24b65129892e9 is 50, key is test_row_0/C:col10/1732785701919/Put/seqid=0 2024-11-28T09:21:44,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742044_1220 (size=12595) 2024-11-28T09:21:44,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742043_1219 (size=12151) 2024-11-28T09:21:44,154 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=215 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/B/d7905fb379b14d56b4a44a94bcd8d425 2024-11-28T09:21:44,166 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/C/88c942143bfc41bbbbb24b65129892e9 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/88c942143bfc41bbbbb24b65129892e9 2024-11-28T09:21:44,167 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:44,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57190 deadline: 1732785764161, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:44,173 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7dfd6fe000bc8ebd2fc5a572e1596e7a/C of 7dfd6fe000bc8ebd2fc5a572e1596e7a into 88c942143bfc41bbbbb24b65129892e9(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:21:44,173 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7dfd6fe000bc8ebd2fc5a572e1596e7a: 2024-11-28T09:21:44,173 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a., storeName=7dfd6fe000bc8ebd2fc5a572e1596e7a/C, priority=13, startTime=1732785703968; duration=0sec 2024-11-28T09:21:44,173 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:21:44,173 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7dfd6fe000bc8ebd2fc5a572e1596e7a:C 2024-11-28T09:21:44,176 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/C/2b18b563a4d2440fb9667bf7fb6aa16f is 50, key is test_row_0/C:col10/1732785702552/Put/seqid=0 2024-11-28T09:21:44,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742045_1221 (size=12151) 2024-11-28T09:21:44,182 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=215 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/C/2b18b563a4d2440fb9667bf7fb6aa16f 2024-11-28T09:21:44,187 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/d1174b57b40b407698e21076bcc8f71d as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/d1174b57b40b407698e21076bcc8f71d 2024-11-28T09:21:44,192 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/d1174b57b40b407698e21076bcc8f71d, entries=200, sequenceid=215, filesize=38.8 K 2024-11-28T09:21:44,194 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/B/d7905fb379b14d56b4a44a94bcd8d425 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/d7905fb379b14d56b4a44a94bcd8d425 2024-11-28T09:21:44,199 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/d7905fb379b14d56b4a44a94bcd8d425, entries=150, sequenceid=215, filesize=11.9 K 2024-11-28T09:21:44,200 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/C/2b18b563a4d2440fb9667bf7fb6aa16f as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/2b18b563a4d2440fb9667bf7fb6aa16f 2024-11-28T09:21:44,207 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/2b18b563a4d2440fb9667bf7fb6aa16f, entries=150, sequenceid=215, filesize=11.9 K 2024-11-28T09:21:44,208 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 7dfd6fe000bc8ebd2fc5a572e1596e7a in 223ms, sequenceid=215, compaction requested=false 2024-11-28T09:21:44,208 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7dfd6fe000bc8ebd2fc5a572e1596e7a: 2024-11-28T09:21:44,233 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:44,234 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-28T09:21:44,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:44,234 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2837): Flushing 7dfd6fe000bc8ebd2fc5a572e1596e7a 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-28T09:21:44,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7dfd6fe000bc8ebd2fc5a572e1596e7a, store=A 2024-11-28T09:21:44,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:44,235 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7dfd6fe000bc8ebd2fc5a572e1596e7a, store=B 2024-11-28T09:21:44,235 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:44,235 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7dfd6fe000bc8ebd2fc5a572e1596e7a, store=C 2024-11-28T09:21:44,235 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:44,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128ffff4d69422e4721bdd63720718b43b8_7dfd6fe000bc8ebd2fc5a572e1596e7a is 50, key is test_row_0/A:col10/1732785704046/Put/seqid=0 2024-11-28T09:21:44,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742046_1222 (size=12304) 2024-11-28T09:21:44,274 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:44,279 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128ffff4d69422e4721bdd63720718b43b8_7dfd6fe000bc8ebd2fc5a572e1596e7a to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128ffff4d69422e4721bdd63720718b43b8_7dfd6fe000bc8ebd2fc5a572e1596e7a 2024-11-28T09:21:44,280 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/15d4fcf7089a4e489cefb699f391492c, store: [table=TestAcidGuarantees family=A region=7dfd6fe000bc8ebd2fc5a572e1596e7a] 2024-11-28T09:21:44,281 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/15d4fcf7089a4e489cefb699f391492c is 175, key is test_row_0/A:col10/1732785704046/Put/seqid=0 2024-11-28T09:21:44,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742047_1223 (size=31105) 2024-11-28T09:21:44,287 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=239, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/15d4fcf7089a4e489cefb699f391492c 2024-11-28T09:21:44,296 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/B/3749cffa20604a858808acdcfa7b8e90 is 50, key is test_row_0/B:col10/1732785704046/Put/seqid=0 2024-11-28T09:21:44,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742048_1224 (size=12151) 2024-11-28T09:21:44,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 7dfd6fe000bc8ebd2fc5a572e1596e7a 2024-11-28T09:21:44,370 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. as already flushing 2024-11-28T09:21:44,412 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:44,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57190 deadline: 1732785764410, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:44,488 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/B/a0db175e655d459d8019c831a6834362 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/a0db175e655d459d8019c831a6834362 2024-11-28T09:21:44,493 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7dfd6fe000bc8ebd2fc5a572e1596e7a/B of 7dfd6fe000bc8ebd2fc5a572e1596e7a into a0db175e655d459d8019c831a6834362(size=12.3 K), total size for store is 24.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:21:44,493 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7dfd6fe000bc8ebd2fc5a572e1596e7a: 2024-11-28T09:21:44,494 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a., storeName=7dfd6fe000bc8ebd2fc5a572e1596e7a/B, priority=13, startTime=1732785703968; duration=0sec 2024-11-28T09:21:44,494 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:21:44,494 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7dfd6fe000bc8ebd2fc5a572e1596e7a:B 2024-11-28T09:21:44,516 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:44,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57190 deadline: 1732785764514, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:44,678 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:44,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57156 deadline: 1732785764676, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:44,678 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:44,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57148 deadline: 1732785764676, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:44,679 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:44,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57184 deadline: 1732785764678, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:44,679 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:44,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57180 deadline: 1732785764679, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:44,704 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=239 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/B/3749cffa20604a858808acdcfa7b8e90 2024-11-28T09:21:44,713 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/C/56269bfe51b14510bd3eaa82bdc3946c is 50, key is test_row_0/C:col10/1732785704046/Put/seqid=0 2024-11-28T09:21:44,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742049_1225 (size=12151) 2024-11-28T09:21:44,717 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=239 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/C/56269bfe51b14510bd3eaa82bdc3946c 2024-11-28T09:21:44,718 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:44,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57190 deadline: 1732785764717, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:44,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/15d4fcf7089a4e489cefb699f391492c as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/15d4fcf7089a4e489cefb699f391492c 2024-11-28T09:21:44,733 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/15d4fcf7089a4e489cefb699f391492c, entries=150, sequenceid=239, filesize=30.4 K 2024-11-28T09:21:44,734 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/B/3749cffa20604a858808acdcfa7b8e90 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/3749cffa20604a858808acdcfa7b8e90 2024-11-28T09:21:44,739 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/3749cffa20604a858808acdcfa7b8e90, entries=150, sequenceid=239, filesize=11.9 K 2024-11-28T09:21:44,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/C/56269bfe51b14510bd3eaa82bdc3946c as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/56269bfe51b14510bd3eaa82bdc3946c 2024-11-28T09:21:44,744 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/56269bfe51b14510bd3eaa82bdc3946c, entries=150, sequenceid=239, filesize=11.9 K 2024-11-28T09:21:44,746 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 7dfd6fe000bc8ebd2fc5a572e1596e7a in 511ms, sequenceid=239, compaction requested=true 2024-11-28T09:21:44,746 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2538): Flush status journal for 7dfd6fe000bc8ebd2fc5a572e1596e7a: 2024-11-28T09:21:44,746 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:44,746 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=56 2024-11-28T09:21:44,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4106): Remote procedure done, pid=56 2024-11-28T09:21:44,749 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=56, resume processing ppid=55 2024-11-28T09:21:44,749 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=56, ppid=55, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 4.9760 sec 2024-11-28T09:21:44,752 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=55, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees in 4.9820 sec 2024-11-28T09:21:45,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 7dfd6fe000bc8ebd2fc5a572e1596e7a 2024-11-28T09:21:45,024 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7dfd6fe000bc8ebd2fc5a572e1596e7a 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-28T09:21:45,025 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7dfd6fe000bc8ebd2fc5a572e1596e7a, store=A 2024-11-28T09:21:45,025 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:45,025 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7dfd6fe000bc8ebd2fc5a572e1596e7a, store=B 2024-11-28T09:21:45,025 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:45,025 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7dfd6fe000bc8ebd2fc5a572e1596e7a, store=C 2024-11-28T09:21:45,025 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:45,067 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128bb2749a789f54bfc877ea58a86fff8d7_7dfd6fe000bc8ebd2fc5a572e1596e7a is 50, key is test_row_0/A:col10/1732785704405/Put/seqid=0 2024-11-28T09:21:45,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742050_1226 (size=14794) 2024-11-28T09:21:45,097 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:45,110 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128bb2749a789f54bfc877ea58a86fff8d7_7dfd6fe000bc8ebd2fc5a572e1596e7a to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128bb2749a789f54bfc877ea58a86fff8d7_7dfd6fe000bc8ebd2fc5a572e1596e7a 2024-11-28T09:21:45,111 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/c4cc41189a924046b3c16feb55668b72, store: [table=TestAcidGuarantees family=A region=7dfd6fe000bc8ebd2fc5a572e1596e7a] 2024-11-28T09:21:45,113 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/c4cc41189a924046b3c16feb55668b72 is 175, key is test_row_0/A:col10/1732785704405/Put/seqid=0 2024-11-28T09:21:45,122 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:45,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57190 deadline: 1732785765119, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:45,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742051_1227 (size=39749) 2024-11-28T09:21:45,164 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=255, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/c4cc41189a924046b3c16feb55668b72 2024-11-28T09:21:45,175 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/B/fdd02c131fdd4664b167cc76f719e602 is 50, key is test_row_0/B:col10/1732785704405/Put/seqid=0 2024-11-28T09:21:45,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742052_1228 (size=12151) 2024-11-28T09:21:45,199 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=255 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/B/fdd02c131fdd4664b167cc76f719e602 2024-11-28T09:21:45,225 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:45,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57190 deadline: 1732785765224, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:45,233 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/C/29be1921acf24f9ab216d93d23e02090 is 50, key is test_row_0/C:col10/1732785704405/Put/seqid=0 2024-11-28T09:21:45,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742053_1229 (size=12151) 2024-11-28T09:21:45,428 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:45,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57190 deadline: 1732785765427, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:45,672 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=255 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/C/29be1921acf24f9ab216d93d23e02090 2024-11-28T09:21:45,678 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/c4cc41189a924046b3c16feb55668b72 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/c4cc41189a924046b3c16feb55668b72 2024-11-28T09:21:45,682 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/c4cc41189a924046b3c16feb55668b72, entries=200, sequenceid=255, filesize=38.8 K 2024-11-28T09:21:45,684 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/B/fdd02c131fdd4664b167cc76f719e602 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/fdd02c131fdd4664b167cc76f719e602 2024-11-28T09:21:45,687 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/fdd02c131fdd4664b167cc76f719e602, entries=150, sequenceid=255, filesize=11.9 K 2024-11-28T09:21:45,688 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/C/29be1921acf24f9ab216d93d23e02090 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/29be1921acf24f9ab216d93d23e02090 2024-11-28T09:21:45,691 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/29be1921acf24f9ab216d93d23e02090, entries=150, sequenceid=255, filesize=11.9 K 2024-11-28T09:21:45,692 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 7dfd6fe000bc8ebd2fc5a572e1596e7a in 668ms, sequenceid=255, compaction requested=true 2024-11-28T09:21:45,692 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7dfd6fe000bc8ebd2fc5a572e1596e7a: 2024-11-28T09:21:45,693 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7dfd6fe000bc8ebd2fc5a572e1596e7a:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T09:21:45,693 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:21:45,693 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7dfd6fe000bc8ebd2fc5a572e1596e7a:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T09:21:45,693 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T09:21:45,693 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:21:45,693 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T09:21:45,693 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7dfd6fe000bc8ebd2fc5a572e1596e7a:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T09:21:45,693 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:21:45,694 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 142152 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T09:21:45,694 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49048 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T09:21:45,694 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1540): 7dfd6fe000bc8ebd2fc5a572e1596e7a/A is initiating minor compaction (all files) 2024-11-28T09:21:45,694 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): 7dfd6fe000bc8ebd2fc5a572e1596e7a/B is initiating minor compaction (all files) 2024-11-28T09:21:45,694 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7dfd6fe000bc8ebd2fc5a572e1596e7a/A in TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:45,694 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7dfd6fe000bc8ebd2fc5a572e1596e7a/B in TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:45,694 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/3f50cdb5395c4c9c8d43bf2edf503013, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/d1174b57b40b407698e21076bcc8f71d, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/15d4fcf7089a4e489cefb699f391492c, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/c4cc41189a924046b3c16feb55668b72] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp, totalSize=138.8 K 2024-11-28T09:21:45,694 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/a0db175e655d459d8019c831a6834362, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/d7905fb379b14d56b4a44a94bcd8d425, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/3749cffa20604a858808acdcfa7b8e90, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/fdd02c131fdd4664b167cc76f719e602] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp, totalSize=47.9 K 2024-11-28T09:21:45,694 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:45,694 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. files: [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/3f50cdb5395c4c9c8d43bf2edf503013, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/d1174b57b40b407698e21076bcc8f71d, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/15d4fcf7089a4e489cefb699f391492c, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/c4cc41189a924046b3c16feb55668b72] 2024-11-28T09:21:45,695 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting a0db175e655d459d8019c831a6834362, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=201, earliestPutTs=1732785701915 2024-11-28T09:21:45,695 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3f50cdb5395c4c9c8d43bf2edf503013, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=201, earliestPutTs=1732785701915 2024-11-28T09:21:45,695 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting d7905fb379b14d56b4a44a94bcd8d425, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1732785702549 2024-11-28T09:21:45,695 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting d1174b57b40b407698e21076bcc8f71d, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1732785702546 2024-11-28T09:21:45,696 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 3749cffa20604a858808acdcfa7b8e90, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=239, earliestPutTs=1732785704037 2024-11-28T09:21:45,696 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 15d4fcf7089a4e489cefb699f391492c, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=239, earliestPutTs=1732785704037 2024-11-28T09:21:45,696 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting c4cc41189a924046b3c16feb55668b72, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1732785704383 2024-11-28T09:21:45,696 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting fdd02c131fdd4664b167cc76f719e602, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1732785704405 2024-11-28T09:21:45,707 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7dfd6fe000bc8ebd2fc5a572e1596e7a#B#compaction#195 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:21:45,707 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/B/d1d20a4ff2c94714b3bc7f7c2a598f7a is 50, key is test_row_0/B:col10/1732785704405/Put/seqid=0 2024-11-28T09:21:45,709 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=7dfd6fe000bc8ebd2fc5a572e1596e7a] 2024-11-28T09:21:45,712 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411285538d5d2cbaf4568a340093f1e63322c_7dfd6fe000bc8ebd2fc5a572e1596e7a store=[table=TestAcidGuarantees family=A region=7dfd6fe000bc8ebd2fc5a572e1596e7a] 2024-11-28T09:21:45,713 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411285538d5d2cbaf4568a340093f1e63322c_7dfd6fe000bc8ebd2fc5a572e1596e7a, store=[table=TestAcidGuarantees family=A region=7dfd6fe000bc8ebd2fc5a572e1596e7a] 2024-11-28T09:21:45,714 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411285538d5d2cbaf4568a340093f1e63322c_7dfd6fe000bc8ebd2fc5a572e1596e7a because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=7dfd6fe000bc8ebd2fc5a572e1596e7a] 2024-11-28T09:21:45,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742054_1230 (size=12731) 2024-11-28T09:21:45,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742055_1231 (size=4469) 2024-11-28T09:21:45,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 7dfd6fe000bc8ebd2fc5a572e1596e7a 2024-11-28T09:21:45,732 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7dfd6fe000bc8ebd2fc5a572e1596e7a 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-28T09:21:45,732 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7dfd6fe000bc8ebd2fc5a572e1596e7a, store=A 2024-11-28T09:21:45,733 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:45,733 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7dfd6fe000bc8ebd2fc5a572e1596e7a, store=B 2024-11-28T09:21:45,733 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:45,733 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7dfd6fe000bc8ebd2fc5a572e1596e7a, store=C 2024-11-28T09:21:45,733 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:45,741 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411285a0669b935974ef3a69508668433d7ab_7dfd6fe000bc8ebd2fc5a572e1596e7a is 50, key is test_row_0/A:col10/1732785705111/Put/seqid=0 2024-11-28T09:21:45,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742056_1232 (size=14994) 2024-11-28T09:21:45,768 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:45,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57190 deadline: 1732785765765, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:45,871 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:45,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57190 deadline: 1732785765870, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:46,073 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:46,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57190 deadline: 1732785766072, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:46,111 INFO [master/363d8d38a970:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-28T09:21:46,111 INFO [master/363d8d38a970:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-28T09:21:46,121 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/B/d1d20a4ff2c94714b3bc7f7c2a598f7a as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/d1d20a4ff2c94714b3bc7f7c2a598f7a 2024-11-28T09:21:46,123 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7dfd6fe000bc8ebd2fc5a572e1596e7a#A#compaction#196 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:21:46,124 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/51c21f2dd3594b078c9ed212cd1c4111 is 175, key is test_row_0/A:col10/1732785704405/Put/seqid=0 2024-11-28T09:21:46,129 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 7dfd6fe000bc8ebd2fc5a572e1596e7a/B of 7dfd6fe000bc8ebd2fc5a572e1596e7a into d1d20a4ff2c94714b3bc7f7c2a598f7a(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:21:46,129 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7dfd6fe000bc8ebd2fc5a572e1596e7a: 2024-11-28T09:21:46,129 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a., storeName=7dfd6fe000bc8ebd2fc5a572e1596e7a/B, priority=12, startTime=1732785705693; duration=0sec 2024-11-28T09:21:46,129 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:21:46,129 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7dfd6fe000bc8ebd2fc5a572e1596e7a:B 2024-11-28T09:21:46,129 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T09:21:46,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742057_1233 (size=31685) 2024-11-28T09:21:46,131 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49048 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T09:21:46,132 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): 7dfd6fe000bc8ebd2fc5a572e1596e7a/C is initiating minor compaction (all files) 2024-11-28T09:21:46,132 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7dfd6fe000bc8ebd2fc5a572e1596e7a/C in TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:46,132 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/88c942143bfc41bbbbb24b65129892e9, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/2b18b563a4d2440fb9667bf7fb6aa16f, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/56269bfe51b14510bd3eaa82bdc3946c, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/29be1921acf24f9ab216d93d23e02090] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp, totalSize=47.9 K 2024-11-28T09:21:46,133 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 88c942143bfc41bbbbb24b65129892e9, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=201, earliestPutTs=1732785701915 2024-11-28T09:21:46,133 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 2b18b563a4d2440fb9667bf7fb6aa16f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1732785702549 2024-11-28T09:21:46,134 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 56269bfe51b14510bd3eaa82bdc3946c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=239, earliestPutTs=1732785704037 2024-11-28T09:21:46,136 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 29be1921acf24f9ab216d93d23e02090, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1732785704405 2024-11-28T09:21:46,138 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/51c21f2dd3594b078c9ed212cd1c4111 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/51c21f2dd3594b078c9ed212cd1c4111 2024-11-28T09:21:46,144 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 7dfd6fe000bc8ebd2fc5a572e1596e7a/A of 7dfd6fe000bc8ebd2fc5a572e1596e7a into 51c21f2dd3594b078c9ed212cd1c4111(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:21:46,144 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7dfd6fe000bc8ebd2fc5a572e1596e7a: 2024-11-28T09:21:46,144 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a., storeName=7dfd6fe000bc8ebd2fc5a572e1596e7a/A, priority=12, startTime=1732785705693; duration=0sec 2024-11-28T09:21:46,144 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:21:46,144 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7dfd6fe000bc8ebd2fc5a572e1596e7a:A 2024-11-28T09:21:46,149 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7dfd6fe000bc8ebd2fc5a572e1596e7a#C#compaction#198 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:21:46,149 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/C/5f946c37335249edb71753611e995f21 is 50, key is test_row_0/C:col10/1732785704405/Put/seqid=0 2024-11-28T09:21:46,152 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:46,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742058_1234 (size=12731) 2024-11-28T09:21:46,161 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411285a0669b935974ef3a69508668433d7ab_7dfd6fe000bc8ebd2fc5a572e1596e7a to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411285a0669b935974ef3a69508668433d7ab_7dfd6fe000bc8ebd2fc5a572e1596e7a 2024-11-28T09:21:46,162 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/7515824bad4e478e9671f5dfde51f526, store: [table=TestAcidGuarantees family=A region=7dfd6fe000bc8ebd2fc5a572e1596e7a] 2024-11-28T09:21:46,162 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/7515824bad4e478e9671f5dfde51f526 is 175, key is test_row_0/A:col10/1732785705111/Put/seqid=0 2024-11-28T09:21:46,165 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/C/5f946c37335249edb71753611e995f21 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/5f946c37335249edb71753611e995f21 2024-11-28T09:21:46,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742059_1235 (size=39949) 2024-11-28T09:21:46,170 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=277, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/7515824bad4e478e9671f5dfde51f526 2024-11-28T09:21:46,172 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 7dfd6fe000bc8ebd2fc5a572e1596e7a/C of 7dfd6fe000bc8ebd2fc5a572e1596e7a into 5f946c37335249edb71753611e995f21(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:21:46,172 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7dfd6fe000bc8ebd2fc5a572e1596e7a: 2024-11-28T09:21:46,172 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a., storeName=7dfd6fe000bc8ebd2fc5a572e1596e7a/C, priority=12, startTime=1732785705693; duration=0sec 2024-11-28T09:21:46,172 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:21:46,172 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7dfd6fe000bc8ebd2fc5a572e1596e7a:C 2024-11-28T09:21:46,178 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/B/47c3babf7dde4f2291f8eef990cdac96 is 50, key is test_row_0/B:col10/1732785705111/Put/seqid=0 2024-11-28T09:21:46,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742060_1236 (size=12301) 2024-11-28T09:21:46,375 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:46,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57190 deadline: 1732785766374, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:46,584 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=277 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/B/47c3babf7dde4f2291f8eef990cdac96 2024-11-28T09:21:46,592 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/C/9db3db4a27734465a1f68c003db62b32 is 50, key is test_row_0/C:col10/1732785705111/Put/seqid=0 2024-11-28T09:21:46,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742061_1237 (size=12301) 2024-11-28T09:21:46,599 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=277 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/C/9db3db4a27734465a1f68c003db62b32 2024-11-28T09:21:46,604 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/7515824bad4e478e9671f5dfde51f526 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/7515824bad4e478e9671f5dfde51f526 2024-11-28T09:21:46,608 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/7515824bad4e478e9671f5dfde51f526, entries=200, sequenceid=277, filesize=39.0 K 2024-11-28T09:21:46,609 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/B/47c3babf7dde4f2291f8eef990cdac96 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/47c3babf7dde4f2291f8eef990cdac96 2024-11-28T09:21:46,614 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/47c3babf7dde4f2291f8eef990cdac96, entries=150, sequenceid=277, filesize=12.0 K 2024-11-28T09:21:46,615 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/C/9db3db4a27734465a1f68c003db62b32 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/9db3db4a27734465a1f68c003db62b32 2024-11-28T09:21:46,618 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/9db3db4a27734465a1f68c003db62b32, entries=150, sequenceid=277, filesize=12.0 K 2024-11-28T09:21:46,619 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 7dfd6fe000bc8ebd2fc5a572e1596e7a in 887ms, sequenceid=277, compaction requested=false 2024-11-28T09:21:46,619 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7dfd6fe000bc8ebd2fc5a572e1596e7a: 2024-11-28T09:21:46,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 7dfd6fe000bc8ebd2fc5a572e1596e7a 2024-11-28T09:21:46,685 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7dfd6fe000bc8ebd2fc5a572e1596e7a 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-28T09:21:46,685 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7dfd6fe000bc8ebd2fc5a572e1596e7a, store=A 2024-11-28T09:21:46,686 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:46,686 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7dfd6fe000bc8ebd2fc5a572e1596e7a, store=B 2024-11-28T09:21:46,686 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:46,686 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7dfd6fe000bc8ebd2fc5a572e1596e7a, store=C 2024-11-28T09:21:46,686 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:46,700 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112885ec8c4b1a114b80912b336108932fa8_7dfd6fe000bc8ebd2fc5a572e1596e7a is 50, key is test_row_0/A:col10/1732785706684/Put/seqid=0 2024-11-28T09:21:46,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742062_1238 (size=14994) 2024-11-28T09:21:46,749 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:46,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57156 deadline: 1732785766713, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:46,752 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:46,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57180 deadline: 1732785766749, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:46,753 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:46,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57184 deadline: 1732785766749, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:46,753 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:46,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57148 deadline: 1732785766749, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:46,853 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:46,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57156 deadline: 1732785766850, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:46,854 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:46,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57180 deadline: 1732785766853, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:46,856 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:46,856 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:46,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57184 deadline: 1732785766855, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:46,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57148 deadline: 1732785766855, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:46,879 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:46,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57190 deadline: 1732785766876, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:47,056 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:47,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57156 deadline: 1732785767056, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:47,057 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:47,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57180 deadline: 1732785767056, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:47,061 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:47,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57184 deadline: 1732785767057, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:47,061 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:47,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57148 deadline: 1732785767058, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:47,111 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:47,117 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112885ec8c4b1a114b80912b336108932fa8_7dfd6fe000bc8ebd2fc5a572e1596e7a to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112885ec8c4b1a114b80912b336108932fa8_7dfd6fe000bc8ebd2fc5a572e1596e7a 2024-11-28T09:21:47,119 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/a52e7c7548924113b34f82844d486152, store: [table=TestAcidGuarantees family=A region=7dfd6fe000bc8ebd2fc5a572e1596e7a] 2024-11-28T09:21:47,119 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/a52e7c7548924113b34f82844d486152 is 175, key is test_row_0/A:col10/1732785706684/Put/seqid=0 2024-11-28T09:21:47,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742063_1239 (size=39949) 2024-11-28T09:21:47,360 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:47,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57156 deadline: 1732785767359, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:47,362 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:47,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57180 deadline: 1732785767361, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:47,364 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:47,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57184 deadline: 1732785767363, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:47,364 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:47,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57148 deadline: 1732785767363, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:47,526 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=295, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/a52e7c7548924113b34f82844d486152 2024-11-28T09:21:47,535 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/B/1f726d9518c649129c10b98236b41f30 is 50, key is test_row_0/B:col10/1732785706684/Put/seqid=0 2024-11-28T09:21:47,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742064_1240 (size=12301) 2024-11-28T09:21:47,864 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:47,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57156 deadline: 1732785767862, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:47,864 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:47,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57180 deadline: 1732785767863, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:47,871 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:47,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57148 deadline: 1732785767868, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:47,873 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:47,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57184 deadline: 1732785767869, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:47,883 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:47,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57190 deadline: 1732785767883, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:47,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-28T09:21:47,902 INFO [Thread-775 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 55 completed 2024-11-28T09:21:47,904 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T09:21:47,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] procedure2.ProcedureExecutor(1098): Stored pid=57, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees 2024-11-28T09:21:47,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-28T09:21:47,907 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=57, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T09:21:47,908 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=57, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T09:21:47,908 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=58, ppid=57, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T09:21:47,941 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=295 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/B/1f726d9518c649129c10b98236b41f30 2024-11-28T09:21:47,953 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/C/5f940e5e08c948fea63c423c13fad834 is 50, key is test_row_0/C:col10/1732785706684/Put/seqid=0 2024-11-28T09:21:47,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742065_1241 (size=12301) 2024-11-28T09:21:47,966 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=295 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/C/5f940e5e08c948fea63c423c13fad834 2024-11-28T09:21:47,973 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/a52e7c7548924113b34f82844d486152 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/a52e7c7548924113b34f82844d486152 2024-11-28T09:21:47,982 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/a52e7c7548924113b34f82844d486152, entries=200, sequenceid=295, filesize=39.0 K 2024-11-28T09:21:47,988 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/B/1f726d9518c649129c10b98236b41f30 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/1f726d9518c649129c10b98236b41f30 2024-11-28T09:21:47,994 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/1f726d9518c649129c10b98236b41f30, entries=150, sequenceid=295, filesize=12.0 K 2024-11-28T09:21:47,995 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/C/5f940e5e08c948fea63c423c13fad834 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/5f940e5e08c948fea63c423c13fad834 2024-11-28T09:21:48,002 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/5f940e5e08c948fea63c423c13fad834, entries=150, sequenceid=295, filesize=12.0 K 2024-11-28T09:21:48,003 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=127.47 KB/130530 for 7dfd6fe000bc8ebd2fc5a572e1596e7a in 1318ms, sequenceid=295, compaction requested=true 2024-11-28T09:21:48,003 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7dfd6fe000bc8ebd2fc5a572e1596e7a: 2024-11-28T09:21:48,003 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7dfd6fe000bc8ebd2fc5a572e1596e7a:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T09:21:48,003 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:21:48,003 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7dfd6fe000bc8ebd2fc5a572e1596e7a:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T09:21:48,003 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:21:48,003 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7dfd6fe000bc8ebd2fc5a572e1596e7a:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T09:21:48,003 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-28T09:21:48,003 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:21:48,003 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:21:48,007 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 111583 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:21:48,007 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1540): 7dfd6fe000bc8ebd2fc5a572e1596e7a/A is initiating minor compaction (all files) 2024-11-28T09:21:48,007 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:21:48,007 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7dfd6fe000bc8ebd2fc5a572e1596e7a/A in TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:48,007 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): 7dfd6fe000bc8ebd2fc5a572e1596e7a/B is initiating minor compaction (all files) 2024-11-28T09:21:48,007 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7dfd6fe000bc8ebd2fc5a572e1596e7a/B in TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:48,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-28T09:21:48,007 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/51c21f2dd3594b078c9ed212cd1c4111, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/7515824bad4e478e9671f5dfde51f526, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/a52e7c7548924113b34f82844d486152] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp, totalSize=109.0 K 2024-11-28T09:21:48,007 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/d1d20a4ff2c94714b3bc7f7c2a598f7a, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/47c3babf7dde4f2291f8eef990cdac96, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/1f726d9518c649129c10b98236b41f30] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp, totalSize=36.5 K 2024-11-28T09:21:48,007 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:48,008 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. files: [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/51c21f2dd3594b078c9ed212cd1c4111, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/7515824bad4e478e9671f5dfde51f526, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/a52e7c7548924113b34f82844d486152] 2024-11-28T09:21:48,008 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting d1d20a4ff2c94714b3bc7f7c2a598f7a, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1732785704405 2024-11-28T09:21:48,008 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 51c21f2dd3594b078c9ed212cd1c4111, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1732785704405 2024-11-28T09:21:48,008 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 47c3babf7dde4f2291f8eef990cdac96, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=277, earliestPutTs=1732785705111 2024-11-28T09:21:48,009 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 1f726d9518c649129c10b98236b41f30, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=295, earliestPutTs=1732785705761 2024-11-28T09:21:48,009 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7515824bad4e478e9671f5dfde51f526, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=277, earliestPutTs=1732785705093 2024-11-28T09:21:48,009 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting a52e7c7548924113b34f82844d486152, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=295, earliestPutTs=1732785705761 2024-11-28T09:21:48,026 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7dfd6fe000bc8ebd2fc5a572e1596e7a#B#compaction#204 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:21:48,027 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/B/bdce3fef871b422d9f519a19bebdb0a4 is 50, key is test_row_0/B:col10/1732785706684/Put/seqid=0 2024-11-28T09:21:48,041 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=7dfd6fe000bc8ebd2fc5a572e1596e7a] 2024-11-28T09:21:48,055 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411284e87294356c040e1968bea71aac5515c_7dfd6fe000bc8ebd2fc5a572e1596e7a store=[table=TestAcidGuarantees family=A region=7dfd6fe000bc8ebd2fc5a572e1596e7a] 2024-11-28T09:21:48,057 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411284e87294356c040e1968bea71aac5515c_7dfd6fe000bc8ebd2fc5a572e1596e7a, store=[table=TestAcidGuarantees family=A region=7dfd6fe000bc8ebd2fc5a572e1596e7a] 2024-11-28T09:21:48,058 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411284e87294356c040e1968bea71aac5515c_7dfd6fe000bc8ebd2fc5a572e1596e7a because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=7dfd6fe000bc8ebd2fc5a572e1596e7a] 2024-11-28T09:21:48,060 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:48,061 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-28T09:21:48,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:48,061 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2837): Flushing 7dfd6fe000bc8ebd2fc5a572e1596e7a 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-28T09:21:48,062 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7dfd6fe000bc8ebd2fc5a572e1596e7a, store=A 2024-11-28T09:21:48,062 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:48,062 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7dfd6fe000bc8ebd2fc5a572e1596e7a, store=B 2024-11-28T09:21:48,062 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:48,062 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7dfd6fe000bc8ebd2fc5a572e1596e7a, store=C 2024-11-28T09:21:48,062 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:48,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742066_1242 (size=12983) 2024-11-28T09:21:48,076 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/B/bdce3fef871b422d9f519a19bebdb0a4 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/bdce3fef871b422d9f519a19bebdb0a4 2024-11-28T09:21:48,085 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7dfd6fe000bc8ebd2fc5a572e1596e7a/B of 7dfd6fe000bc8ebd2fc5a572e1596e7a into bdce3fef871b422d9f519a19bebdb0a4(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:21:48,085 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7dfd6fe000bc8ebd2fc5a572e1596e7a: 2024-11-28T09:21:48,085 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a., storeName=7dfd6fe000bc8ebd2fc5a572e1596e7a/B, priority=13, startTime=1732785708003; duration=0sec 2024-11-28T09:21:48,085 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:21:48,085 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7dfd6fe000bc8ebd2fc5a572e1596e7a:B 2024-11-28T09:21:48,085 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:21:48,087 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:21:48,087 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): 7dfd6fe000bc8ebd2fc5a572e1596e7a/C is initiating minor compaction (all files) 2024-11-28T09:21:48,087 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7dfd6fe000bc8ebd2fc5a572e1596e7a/C in TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:48,087 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/5f946c37335249edb71753611e995f21, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/9db3db4a27734465a1f68c003db62b32, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/5f940e5e08c948fea63c423c13fad834] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp, totalSize=36.5 K 2024-11-28T09:21:48,088 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 5f946c37335249edb71753611e995f21, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=255, earliestPutTs=1732785704405 2024-11-28T09:21:48,088 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 9db3db4a27734465a1f68c003db62b32, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=277, earliestPutTs=1732785705111 2024-11-28T09:21:48,089 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 5f940e5e08c948fea63c423c13fad834, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=295, earliestPutTs=1732785705761 2024-11-28T09:21:48,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742067_1243 (size=4469) 2024-11-28T09:21:48,113 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411283f1ecfc2ae934c2a8bed2c025013dd56_7dfd6fe000bc8ebd2fc5a572e1596e7a is 50, key is test_row_0/A:col10/1732785706712/Put/seqid=0 2024-11-28T09:21:48,122 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7dfd6fe000bc8ebd2fc5a572e1596e7a#C#compaction#207 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:21:48,123 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/C/78850dcd4b514aae8814f4e4674f3bca is 50, key is test_row_0/C:col10/1732785706684/Put/seqid=0 2024-11-28T09:21:48,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742068_1244 (size=12454) 2024-11-28T09:21:48,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,158 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411283f1ecfc2ae934c2a8bed2c025013dd56_7dfd6fe000bc8ebd2fc5a572e1596e7a to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411283f1ecfc2ae934c2a8bed2c025013dd56_7dfd6fe000bc8ebd2fc5a572e1596e7a 2024-11-28T09:21:48,159 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/b40fa953c50044da961651df39deecb9, store: [table=TestAcidGuarantees family=A region=7dfd6fe000bc8ebd2fc5a572e1596e7a] 2024-11-28T09:21:48,160 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/b40fa953c50044da961651df39deecb9 is 175, key is test_row_0/A:col10/1732785706712/Put/seqid=0 2024-11-28T09:21:48,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742069_1245 (size=12983) 2024-11-28T09:21:48,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742070_1246 (size=31255) 2024-11-28T09:21:48,195 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=317, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/b40fa953c50044da961651df39deecb9 2024-11-28T09:21:48,205 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/B/3c01bd2dedc04d5eab68bdd9868e7aad is 50, key is test_row_0/B:col10/1732785706712/Put/seqid=0 2024-11-28T09:21:48,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-28T09:21:48,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742071_1247 (size=12301) 2024-11-28T09:21:48,224 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=317 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/B/3c01bd2dedc04d5eab68bdd9868e7aad 2024-11-28T09:21:48,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/C/f0d218e4b44c4a7cbc6a970821829e46 is 50, key is test_row_0/C:col10/1732785706712/Put/seqid=0 2024-11-28T09:21:48,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742072_1248 (size=12301) 2024-11-28T09:21:48,253 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=317 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/C/f0d218e4b44c4a7cbc6a970821829e46 2024-11-28T09:21:48,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/b40fa953c50044da961651df39deecb9 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/b40fa953c50044da961651df39deecb9 2024-11-28T09:21:48,269 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/b40fa953c50044da961651df39deecb9, entries=150, sequenceid=317, filesize=30.5 K 2024-11-28T09:21:48,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/B/3c01bd2dedc04d5eab68bdd9868e7aad as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/3c01bd2dedc04d5eab68bdd9868e7aad 2024-11-28T09:21:48,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,276 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/3c01bd2dedc04d5eab68bdd9868e7aad, entries=150, sequenceid=317, filesize=12.0 K 2024-11-28T09:21:48,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,278 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/C/f0d218e4b44c4a7cbc6a970821829e46 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/f0d218e4b44c4a7cbc6a970821829e46 2024-11-28T09:21:48,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,287 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/f0d218e4b44c4a7cbc6a970821829e46, entries=150, sequenceid=317, filesize=12.0 K 2024-11-28T09:21:48,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,288 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=0 B/0 for 7dfd6fe000bc8ebd2fc5a572e1596e7a in 227ms, sequenceid=317, compaction requested=false 2024-11-28T09:21:48,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2538): Flush status journal for 7dfd6fe000bc8ebd2fc5a572e1596e7a: 2024-11-28T09:21:48,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:48,289 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=58 2024-11-28T09:21:48,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4106): Remote procedure done, pid=58 2024-11-28T09:21:48,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,292 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=58, resume processing ppid=57 2024-11-28T09:21:48,292 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=58, ppid=57, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 382 msec 2024-11-28T09:21:48,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,294 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=57, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees in 389 msec 2024-11-28T09:21:48,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-28T09:21:48,509 INFO [Thread-775 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 57 completed 2024-11-28T09:21:48,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,511 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T09:21:48,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,513 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7dfd6fe000bc8ebd2fc5a572e1596e7a#A#compaction#205 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:21:48,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,514 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/ed3c405b354748c18fb2878c02d33dde is 175, key is test_row_0/A:col10/1732785706684/Put/seqid=0 2024-11-28T09:21:48,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] procedure2.ProcedureExecutor(1098): Stored pid=59, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees 2024-11-28T09:21:48,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-28T09:21:48,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,518 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=59, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T09:21:48,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,519 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=59, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T09:21:48,519 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=60, ppid=59, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T09:21:48,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742073_1249 (size=31937) 2024-11-28T09:21:48,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,563 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/ed3c405b354748c18fb2878c02d33dde as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/ed3c405b354748c18fb2878c02d33dde 2024-11-28T09:21:48,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,570 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7dfd6fe000bc8ebd2fc5a572e1596e7a/A of 7dfd6fe000bc8ebd2fc5a572e1596e7a into ed3c405b354748c18fb2878c02d33dde(size=31.2 K), total size for store is 61.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:21:48,570 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7dfd6fe000bc8ebd2fc5a572e1596e7a: 2024-11-28T09:21:48,570 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a., storeName=7dfd6fe000bc8ebd2fc5a572e1596e7a/A, priority=13, startTime=1732785708003; duration=0sec 2024-11-28T09:21:48,570 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:21:48,570 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7dfd6fe000bc8ebd2fc5a572e1596e7a:A 2024-11-28T09:21:48,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,589 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/C/78850dcd4b514aae8814f4e4674f3bca as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/78850dcd4b514aae8814f4e4674f3bca 2024-11-28T09:21:48,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,595 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7dfd6fe000bc8ebd2fc5a572e1596e7a/C of 7dfd6fe000bc8ebd2fc5a572e1596e7a into 78850dcd4b514aae8814f4e4674f3bca(size=12.7 K), total size for store is 24.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:21:48,595 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7dfd6fe000bc8ebd2fc5a572e1596e7a: 2024-11-28T09:21:48,595 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a., storeName=7dfd6fe000bc8ebd2fc5a572e1596e7a/C, priority=13, startTime=1732785708003; duration=0sec 2024-11-28T09:21:48,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,595 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:21:48,595 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7dfd6fe000bc8ebd2fc5a572e1596e7a:C 2024-11-28T09:21:48,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-28T09:21:48,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,671 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:48,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,671 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-28T09:21:48,672 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:48,672 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2538): Flush status journal for 7dfd6fe000bc8ebd2fc5a572e1596e7a: 2024-11-28T09:21:48,672 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:48,672 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=60 2024-11-28T09:21:48,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4106): Remote procedure done, pid=60 2024-11-28T09:21:48,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,675 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=60, resume processing ppid=59 2024-11-28T09:21:48,675 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=60, ppid=59, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 154 msec 2024-11-28T09:21:48,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,677 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=59, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees in 164 msec 2024-11-28T09:21:48,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-28T09:21:48,817 INFO [Thread-775 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 59 completed 2024-11-28T09:21:48,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,821 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T09:21:48,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] procedure2.ProcedureExecutor(1098): Stored pid=61, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees 2024-11-28T09:21:48,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,822 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=61, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T09:21:48,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-28T09:21:48,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,823 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=61, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T09:21:48,823 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=62, ppid=61, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T09:21:48,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-28T09:21:48,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 7dfd6fe000bc8ebd2fc5a572e1596e7a 2024-11-28T09:21:48,924 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7dfd6fe000bc8ebd2fc5a572e1596e7a 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-28T09:21:48,924 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7dfd6fe000bc8ebd2fc5a572e1596e7a, store=A 2024-11-28T09:21:48,924 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:48,924 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7dfd6fe000bc8ebd2fc5a572e1596e7a, store=B 2024-11-28T09:21:48,924 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:48,924 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7dfd6fe000bc8ebd2fc5a572e1596e7a, store=C 2024-11-28T09:21:48,924 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:48,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,935 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112842a768492e644cda8863ed995543b9e0_7dfd6fe000bc8ebd2fc5a572e1596e7a is 50, key is test_row_0/A:col10/1732785708910/Put/seqid=0 2024-11-28T09:21:48,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742074_1250 (size=25158) 2024-11-28T09:21:48,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,975 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:48,975 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-11-28T09:21:48,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:48,976 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. as already flushing 2024-11-28T09:21:48,976 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:48,976 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:48,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57184 deadline: 1732785768969, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:48,976 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:48,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,976 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:48,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:48,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,979 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:48,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57156 deadline: 1732785768972, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:48,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,982 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:48,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57180 deadline: 1732785768975, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:48,982 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:48,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57148 deadline: 1732785768976, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:48,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:48,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:49,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:49,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:49,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:49,077 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:49,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57184 deadline: 1732785769077, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:49,082 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:49,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57156 deadline: 1732785769080, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:49,083 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:49,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57180 deadline: 1732785769083, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:49,085 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:49,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57148 deadline: 1732785769084, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:49,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-28T09:21:49,128 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:49,128 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-11-28T09:21:49,129 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:49,129 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. as already flushing 2024-11-28T09:21:49,129 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:49,129 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:49,129 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:49,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:49,279 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:49,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57184 deadline: 1732785769279, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:49,282 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:49,283 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-11-28T09:21:49,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:49,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. as already flushing 2024-11-28T09:21:49,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:49,283 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:49,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:49,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:49,286 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:49,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57156 deadline: 1732785769284, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:49,287 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:49,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57180 deadline: 1732785769286, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:49,288 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:49,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57148 deadline: 1732785769286, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:49,341 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:49,346 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112842a768492e644cda8863ed995543b9e0_7dfd6fe000bc8ebd2fc5a572e1596e7a to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112842a768492e644cda8863ed995543b9e0_7dfd6fe000bc8ebd2fc5a572e1596e7a 2024-11-28T09:21:49,347 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/18024eb021aa45608affc648ab37e536, store: [table=TestAcidGuarantees family=A region=7dfd6fe000bc8ebd2fc5a572e1596e7a] 2024-11-28T09:21:49,348 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/18024eb021aa45608affc648ab37e536 is 175, key is test_row_0/A:col10/1732785708910/Put/seqid=0 2024-11-28T09:21:49,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742075_1251 (size=74795) 2024-11-28T09:21:49,350 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=332, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/18024eb021aa45608affc648ab37e536 2024-11-28T09:21:49,359 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/B/2e397e6df0b44a47b12da708fe475965 is 50, key is test_row_0/B:col10/1732785708910/Put/seqid=0 2024-11-28T09:21:49,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742076_1252 (size=12301) 2024-11-28T09:21:49,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-28T09:21:49,436 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:49,436 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-11-28T09:21:49,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:49,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. as already flushing 2024-11-28T09:21:49,437 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:49,437 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:49,437 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:49,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:49,584 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:49,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57184 deadline: 1732785769583, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:49,589 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:49,589 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-11-28T09:21:49,589 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:49,589 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. as already flushing 2024-11-28T09:21:49,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:49,589 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:49,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57148 deadline: 1732785769588, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:49,590 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:49,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:49,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:49,591 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:49,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57156 deadline: 1732785769590, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:49,592 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:49,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57180 deadline: 1732785769590, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:49,743 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:49,745 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-11-28T09:21:49,745 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:49,745 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. as already flushing 2024-11-28T09:21:49,745 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:49,745 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:49,745 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:49,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:49,771 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=332 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/B/2e397e6df0b44a47b12da708fe475965 2024-11-28T09:21:49,793 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/C/a91129f794ba419d99b5071cf577bda8 is 50, key is test_row_0/C:col10/1732785708910/Put/seqid=0 2024-11-28T09:21:49,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742077_1253 (size=12301) 2024-11-28T09:21:49,825 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=332 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/C/a91129f794ba419d99b5071cf577bda8 2024-11-28T09:21:49,838 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/18024eb021aa45608affc648ab37e536 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/18024eb021aa45608affc648ab37e536 2024-11-28T09:21:49,845 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/18024eb021aa45608affc648ab37e536, entries=400, sequenceid=332, filesize=73.0 K 2024-11-28T09:21:49,846 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/B/2e397e6df0b44a47b12da708fe475965 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/2e397e6df0b44a47b12da708fe475965 2024-11-28T09:21:49,853 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/2e397e6df0b44a47b12da708fe475965, entries=150, sequenceid=332, filesize=12.0 K 2024-11-28T09:21:49,854 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/C/a91129f794ba419d99b5071cf577bda8 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/a91129f794ba419d99b5071cf577bda8 2024-11-28T09:21:49,866 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/a91129f794ba419d99b5071cf577bda8, entries=150, sequenceid=332, filesize=12.0 K 2024-11-28T09:21:49,868 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 7dfd6fe000bc8ebd2fc5a572e1596e7a in 944ms, sequenceid=332, compaction requested=true 2024-11-28T09:21:49,868 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7dfd6fe000bc8ebd2fc5a572e1596e7a: 2024-11-28T09:21:49,868 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7dfd6fe000bc8ebd2fc5a572e1596e7a:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T09:21:49,868 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:21:49,868 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:21:49,868 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:21:49,869 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7dfd6fe000bc8ebd2fc5a572e1596e7a:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T09:21:49,869 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:21:49,870 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7dfd6fe000bc8ebd2fc5a572e1596e7a:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T09:21:49,870 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:21:49,870 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:21:49,870 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 137987 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:21:49,870 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): 7dfd6fe000bc8ebd2fc5a572e1596e7a/B is initiating minor compaction (all files) 2024-11-28T09:21:49,870 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1540): 7dfd6fe000bc8ebd2fc5a572e1596e7a/A is initiating minor compaction (all files) 2024-11-28T09:21:49,870 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7dfd6fe000bc8ebd2fc5a572e1596e7a/B in TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:49,870 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7dfd6fe000bc8ebd2fc5a572e1596e7a/A in TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:49,870 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/bdce3fef871b422d9f519a19bebdb0a4, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/3c01bd2dedc04d5eab68bdd9868e7aad, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/2e397e6df0b44a47b12da708fe475965] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp, totalSize=36.7 K 2024-11-28T09:21:49,870 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/ed3c405b354748c18fb2878c02d33dde, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/b40fa953c50044da961651df39deecb9, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/18024eb021aa45608affc648ab37e536] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp, totalSize=134.8 K 2024-11-28T09:21:49,870 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:49,870 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. files: [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/ed3c405b354748c18fb2878c02d33dde, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/b40fa953c50044da961651df39deecb9, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/18024eb021aa45608affc648ab37e536] 2024-11-28T09:21:49,871 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting bdce3fef871b422d9f519a19bebdb0a4, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=295, earliestPutTs=1732785705761 2024-11-28T09:21:49,871 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting ed3c405b354748c18fb2878c02d33dde, keycount=150, bloomtype=ROW, size=31.2 K, encoding=NONE, compression=NONE, seqNum=295, earliestPutTs=1732785705761 2024-11-28T09:21:49,871 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 3c01bd2dedc04d5eab68bdd9868e7aad, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=317, earliestPutTs=1732785706712 2024-11-28T09:21:49,872 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting b40fa953c50044da961651df39deecb9, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=317, earliestPutTs=1732785706712 2024-11-28T09:21:49,872 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 18024eb021aa45608affc648ab37e536, keycount=400, bloomtype=ROW, size=73.0 K, encoding=NONE, compression=NONE, seqNum=332, earliestPutTs=1732785708874 2024-11-28T09:21:49,872 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 2e397e6df0b44a47b12da708fe475965, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=332, earliestPutTs=1732785708910 2024-11-28T09:21:49,884 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7dfd6fe000bc8ebd2fc5a572e1596e7a#B#compaction#213 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:21:49,884 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=7dfd6fe000bc8ebd2fc5a572e1596e7a] 2024-11-28T09:21:49,885 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/B/16062192dff54216b2674e591b00164f is 50, key is test_row_0/B:col10/1732785708910/Put/seqid=0 2024-11-28T09:21:49,895 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7dfd6fe000bc8ebd2fc5a572e1596e7a 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-28T09:21:49,895 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7dfd6fe000bc8ebd2fc5a572e1596e7a, store=A 2024-11-28T09:21:49,895 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:49,895 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7dfd6fe000bc8ebd2fc5a572e1596e7a, store=B 2024-11-28T09:21:49,895 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:49,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 7dfd6fe000bc8ebd2fc5a572e1596e7a 2024-11-28T09:21:49,895 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7dfd6fe000bc8ebd2fc5a572e1596e7a, store=C 2024-11-28T09:21:49,895 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:49,898 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:49,899 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-11-28T09:21:49,899 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:49,899 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. as already flushing 2024-11-28T09:21:49,899 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:49,899 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:49,899 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:49,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:49,900 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241128cc5b90b4f4e045c1aa308c20ef5a91a5_7dfd6fe000bc8ebd2fc5a572e1596e7a store=[table=TestAcidGuarantees family=A region=7dfd6fe000bc8ebd2fc5a572e1596e7a] 2024-11-28T09:21:49,904 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241128cc5b90b4f4e045c1aa308c20ef5a91a5_7dfd6fe000bc8ebd2fc5a572e1596e7a, store=[table=TestAcidGuarantees family=A region=7dfd6fe000bc8ebd2fc5a572e1596e7a] 2024-11-28T09:21:49,904 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128cc5b90b4f4e045c1aa308c20ef5a91a5_7dfd6fe000bc8ebd2fc5a572e1596e7a because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=7dfd6fe000bc8ebd2fc5a572e1596e7a] 2024-11-28T09:21:49,927 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:49,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57190 deadline: 1732785769924, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:49,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-28T09:21:49,937 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411285e2105a96d244d25b796a5a67cf8b06f_7dfd6fe000bc8ebd2fc5a572e1596e7a is 50, key is test_row_0/A:col10/1732785708974/Put/seqid=0 2024-11-28T09:21:49,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742078_1254 (size=13085) 2024-11-28T09:21:49,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742079_1255 (size=4469) 2024-11-28T09:21:49,984 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7dfd6fe000bc8ebd2fc5a572e1596e7a#A#compaction#214 average throughput is 0.24 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:21:49,985 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/f233c1d262684b249b4e44386db652e7 is 175, key is test_row_0/A:col10/1732785708910/Put/seqid=0 2024-11-28T09:21:49,987 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/B/16062192dff54216b2674e591b00164f as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/16062192dff54216b2674e591b00164f 2024-11-28T09:21:49,995 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7dfd6fe000bc8ebd2fc5a572e1596e7a/B of 7dfd6fe000bc8ebd2fc5a572e1596e7a into 16062192dff54216b2674e591b00164f(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:21:49,995 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7dfd6fe000bc8ebd2fc5a572e1596e7a: 2024-11-28T09:21:49,996 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a., storeName=7dfd6fe000bc8ebd2fc5a572e1596e7a/B, priority=13, startTime=1732785709868; duration=0sec 2024-11-28T09:21:49,996 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:21:49,996 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7dfd6fe000bc8ebd2fc5a572e1596e7a:B 2024-11-28T09:21:49,996 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:21:49,998 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:21:49,998 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): 7dfd6fe000bc8ebd2fc5a572e1596e7a/C is initiating minor compaction (all files) 2024-11-28T09:21:49,998 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7dfd6fe000bc8ebd2fc5a572e1596e7a/C in TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:49,998 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/78850dcd4b514aae8814f4e4674f3bca, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/f0d218e4b44c4a7cbc6a970821829e46, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/a91129f794ba419d99b5071cf577bda8] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp, totalSize=36.7 K 2024-11-28T09:21:49,999 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 78850dcd4b514aae8814f4e4674f3bca, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=295, earliestPutTs=1732785705761 2024-11-28T09:21:49,999 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting f0d218e4b44c4a7cbc6a970821829e46, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=317, earliestPutTs=1732785706712 2024-11-28T09:21:50,000 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting a91129f794ba419d99b5071cf577bda8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=332, earliestPutTs=1732785708910 2024-11-28T09:21:50,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742080_1256 (size=14994) 2024-11-28T09:21:50,011 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7dfd6fe000bc8ebd2fc5a572e1596e7a#C#compaction#216 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:21:50,011 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/C/042828c3f16b49d887fc9594d2e6c606 is 50, key is test_row_0/C:col10/1732785708910/Put/seqid=0 2024-11-28T09:21:50,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742081_1257 (size=32039) 2024-11-28T09:21:50,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742082_1258 (size=13085) 2024-11-28T09:21:50,030 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:50,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57190 deadline: 1732785770028, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:50,051 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:50,052 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-11-28T09:21:50,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:50,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. as already flushing 2024-11-28T09:21:50,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:50,052 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:50,053 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:50,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:50,090 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:50,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57184 deadline: 1732785770088, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:50,095 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:50,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57148 deadline: 1732785770094, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:50,098 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:50,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57156 deadline: 1732785770096, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:50,098 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:50,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57180 deadline: 1732785770096, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:50,206 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:50,206 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-11-28T09:21:50,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:50,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. as already flushing 2024-11-28T09:21:50,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:50,207 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:50,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:50,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:50,234 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:50,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57190 deadline: 1732785770233, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:50,359 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:50,360 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-11-28T09:21:50,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:50,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. as already flushing 2024-11-28T09:21:50,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:50,360 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:50,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:50,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:50,410 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:50,414 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411285e2105a96d244d25b796a5a67cf8b06f_7dfd6fe000bc8ebd2fc5a572e1596e7a to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411285e2105a96d244d25b796a5a67cf8b06f_7dfd6fe000bc8ebd2fc5a572e1596e7a 2024-11-28T09:21:50,416 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/969454b694574891bc3d93e14fe8c959, store: [table=TestAcidGuarantees family=A region=7dfd6fe000bc8ebd2fc5a572e1596e7a] 2024-11-28T09:21:50,416 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/969454b694574891bc3d93e14fe8c959 is 175, key is test_row_0/A:col10/1732785708974/Put/seqid=0 2024-11-28T09:21:50,422 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/f233c1d262684b249b4e44386db652e7 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/f233c1d262684b249b4e44386db652e7 2024-11-28T09:21:50,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742083_1259 (size=39949) 2024-11-28T09:21:50,424 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=358, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/969454b694574891bc3d93e14fe8c959 2024-11-28T09:21:50,424 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/C/042828c3f16b49d887fc9594d2e6c606 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/042828c3f16b49d887fc9594d2e6c606 2024-11-28T09:21:50,430 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7dfd6fe000bc8ebd2fc5a572e1596e7a/A of 7dfd6fe000bc8ebd2fc5a572e1596e7a into f233c1d262684b249b4e44386db652e7(size=31.3 K), total size for store is 31.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:21:50,430 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7dfd6fe000bc8ebd2fc5a572e1596e7a: 2024-11-28T09:21:50,431 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a., storeName=7dfd6fe000bc8ebd2fc5a572e1596e7a/A, priority=13, startTime=1732785709868; duration=0sec 2024-11-28T09:21:50,431 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:21:50,431 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7dfd6fe000bc8ebd2fc5a572e1596e7a:A 2024-11-28T09:21:50,433 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7dfd6fe000bc8ebd2fc5a572e1596e7a/C of 7dfd6fe000bc8ebd2fc5a572e1596e7a into 042828c3f16b49d887fc9594d2e6c606(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:21:50,433 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7dfd6fe000bc8ebd2fc5a572e1596e7a: 2024-11-28T09:21:50,433 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a., storeName=7dfd6fe000bc8ebd2fc5a572e1596e7a/C, priority=13, startTime=1732785709869; duration=0sec 2024-11-28T09:21:50,433 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:21:50,433 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7dfd6fe000bc8ebd2fc5a572e1596e7a:C 2024-11-28T09:21:50,435 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/B/5a5838d2b3054785ae4b807d7bcef092 is 50, key is test_row_0/B:col10/1732785708974/Put/seqid=0 2024-11-28T09:21:50,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742084_1260 (size=12301) 2024-11-28T09:21:50,456 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=358 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/B/5a5838d2b3054785ae4b807d7bcef092 2024-11-28T09:21:50,466 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/C/8483170b57fb42738f04d88449b0038e is 50, key is test_row_0/C:col10/1732785708974/Put/seqid=0 2024-11-28T09:21:50,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742085_1261 (size=12301) 2024-11-28T09:21:50,486 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=358 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/C/8483170b57fb42738f04d88449b0038e 2024-11-28T09:21:50,495 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/969454b694574891bc3d93e14fe8c959 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/969454b694574891bc3d93e14fe8c959 2024-11-28T09:21:50,499 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/969454b694574891bc3d93e14fe8c959, entries=200, sequenceid=358, filesize=39.0 K 2024-11-28T09:21:50,500 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/B/5a5838d2b3054785ae4b807d7bcef092 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/5a5838d2b3054785ae4b807d7bcef092 2024-11-28T09:21:50,505 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/5a5838d2b3054785ae4b807d7bcef092, entries=150, sequenceid=358, filesize=12.0 K 2024-11-28T09:21:50,506 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/C/8483170b57fb42738f04d88449b0038e as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/8483170b57fb42738f04d88449b0038e 2024-11-28T09:21:50,513 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:50,514 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-11-28T09:21:50,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:50,514 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/8483170b57fb42738f04d88449b0038e, entries=150, sequenceid=358, filesize=12.0 K 2024-11-28T09:21:50,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. as already flushing 2024-11-28T09:21:50,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:50,514 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:50,515 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:50,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:50,516 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 7dfd6fe000bc8ebd2fc5a572e1596e7a in 621ms, sequenceid=358, compaction requested=false 2024-11-28T09:21:50,516 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7dfd6fe000bc8ebd2fc5a572e1596e7a: 2024-11-28T09:21:50,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 7dfd6fe000bc8ebd2fc5a572e1596e7a 2024-11-28T09:21:50,540 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7dfd6fe000bc8ebd2fc5a572e1596e7a 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-28T09:21:50,540 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7dfd6fe000bc8ebd2fc5a572e1596e7a, store=A 2024-11-28T09:21:50,540 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:50,540 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7dfd6fe000bc8ebd2fc5a572e1596e7a, store=B 2024-11-28T09:21:50,541 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:50,541 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7dfd6fe000bc8ebd2fc5a572e1596e7a, store=C 2024-11-28T09:21:50,541 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:50,550 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411284f0a22667f544503994be60753f3ed88_7dfd6fe000bc8ebd2fc5a572e1596e7a is 50, key is test_row_0/A:col10/1732785709913/Put/seqid=0 2024-11-28T09:21:50,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742086_1262 (size=12454) 2024-11-28T09:21:50,558 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:50,563 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411284f0a22667f544503994be60753f3ed88_7dfd6fe000bc8ebd2fc5a572e1596e7a to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411284f0a22667f544503994be60753f3ed88_7dfd6fe000bc8ebd2fc5a572e1596e7a 2024-11-28T09:21:50,568 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/76afa567f5ff49a089d722f8fe87c8ef, store: [table=TestAcidGuarantees family=A region=7dfd6fe000bc8ebd2fc5a572e1596e7a] 2024-11-28T09:21:50,568 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/76afa567f5ff49a089d722f8fe87c8ef is 175, key is test_row_0/A:col10/1732785709913/Put/seqid=0 2024-11-28T09:21:50,597 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:50,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57190 deadline: 1732785770596, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:50,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742087_1263 (size=31255) 2024-11-28T09:21:50,620 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=372, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/76afa567f5ff49a089d722f8fe87c8ef 2024-11-28T09:21:50,638 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/B/5a5de9ea9e2244f79775b985ac2418f9 is 50, key is test_row_0/B:col10/1732785709913/Put/seqid=0 2024-11-28T09:21:50,667 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:50,667 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-11-28T09:21:50,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:50,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. as already flushing 2024-11-28T09:21:50,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:50,668 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:50,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:50,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:50,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742088_1264 (size=12301) 2024-11-28T09:21:50,677 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=372 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/B/5a5de9ea9e2244f79775b985ac2418f9 2024-11-28T09:21:50,699 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/C/6cdef821b67f460aaeed55bdb199103a is 50, key is test_row_0/C:col10/1732785709913/Put/seqid=0 2024-11-28T09:21:50,700 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:50,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57190 deadline: 1732785770699, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:50,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742089_1265 (size=12301) 2024-11-28T09:21:50,820 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:50,821 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-11-28T09:21:50,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:50,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. as already flushing 2024-11-28T09:21:50,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:50,821 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:50,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:50,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:50,903 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:50,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57190 deadline: 1732785770901, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:50,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-28T09:21:50,974 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:50,975 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-11-28T09:21:50,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:50,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. as already flushing 2024-11-28T09:21:50,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:50,975 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:50,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:50,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:51,094 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:51,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57184 deadline: 1732785771094, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:51,098 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:51,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57148 deadline: 1732785771097, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:51,103 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:51,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57180 deadline: 1732785771102, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:51,104 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:51,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57156 deadline: 1732785771104, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:51,127 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=372 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/C/6cdef821b67f460aaeed55bdb199103a 2024-11-28T09:21:51,127 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:51,128 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-11-28T09:21:51,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:51,129 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. as already flushing 2024-11-28T09:21:51,129 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:51,129 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:51,129 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:51,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:51,135 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/76afa567f5ff49a089d722f8fe87c8ef as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/76afa567f5ff49a089d722f8fe87c8ef 2024-11-28T09:21:51,142 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/76afa567f5ff49a089d722f8fe87c8ef, entries=150, sequenceid=372, filesize=30.5 K 2024-11-28T09:21:51,144 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/B/5a5de9ea9e2244f79775b985ac2418f9 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/5a5de9ea9e2244f79775b985ac2418f9 2024-11-28T09:21:51,156 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/5a5de9ea9e2244f79775b985ac2418f9, entries=150, sequenceid=372, filesize=12.0 K 2024-11-28T09:21:51,158 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/C/6cdef821b67f460aaeed55bdb199103a as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/6cdef821b67f460aaeed55bdb199103a 2024-11-28T09:21:51,163 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/6cdef821b67f460aaeed55bdb199103a, entries=150, sequenceid=372, filesize=12.0 K 2024-11-28T09:21:51,164 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 7dfd6fe000bc8ebd2fc5a572e1596e7a in 625ms, sequenceid=372, compaction requested=true 2024-11-28T09:21:51,164 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7dfd6fe000bc8ebd2fc5a572e1596e7a: 2024-11-28T09:21:51,164 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7dfd6fe000bc8ebd2fc5a572e1596e7a:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T09:21:51,164 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:21:51,164 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7dfd6fe000bc8ebd2fc5a572e1596e7a:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T09:21:51,164 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:21:51,164 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:21:51,164 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7dfd6fe000bc8ebd2fc5a572e1596e7a:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T09:21:51,164 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-28T09:21:51,164 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:21:51,166 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 103243 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:21:51,166 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): 7dfd6fe000bc8ebd2fc5a572e1596e7a/A is initiating minor compaction (all files) 2024-11-28T09:21:51,166 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7dfd6fe000bc8ebd2fc5a572e1596e7a/A in TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:51,166 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/f233c1d262684b249b4e44386db652e7, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/969454b694574891bc3d93e14fe8c959, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/76afa567f5ff49a089d722f8fe87c8ef] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp, totalSize=100.8 K 2024-11-28T09:21:51,166 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:51,166 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. files: [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/f233c1d262684b249b4e44386db652e7, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/969454b694574891bc3d93e14fe8c959, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/76afa567f5ff49a089d722f8fe87c8ef] 2024-11-28T09:21:51,173 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:21:51,173 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1540): 7dfd6fe000bc8ebd2fc5a572e1596e7a/B is initiating minor compaction (all files) 2024-11-28T09:21:51,173 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting f233c1d262684b249b4e44386db652e7, keycount=150, bloomtype=ROW, size=31.3 K, encoding=NONE, compression=NONE, seqNum=332, earliestPutTs=1732785708910 2024-11-28T09:21:51,174 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7dfd6fe000bc8ebd2fc5a572e1596e7a/B in TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:51,174 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/16062192dff54216b2674e591b00164f, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/5a5838d2b3054785ae4b807d7bcef092, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/5a5de9ea9e2244f79775b985ac2418f9] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp, totalSize=36.8 K 2024-11-28T09:21:51,174 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 969454b694574891bc3d93e14fe8c959, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=358, earliestPutTs=1732785708962 2024-11-28T09:21:51,174 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 16062192dff54216b2674e591b00164f, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=332, earliestPutTs=1732785708910 2024-11-28T09:21:51,175 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 76afa567f5ff49a089d722f8fe87c8ef, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=372, earliestPutTs=1732785709913 2024-11-28T09:21:51,176 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5a5838d2b3054785ae4b807d7bcef092, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=358, earliestPutTs=1732785708962 2024-11-28T09:21:51,176 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5a5de9ea9e2244f79775b985ac2418f9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=372, earliestPutTs=1732785709913 2024-11-28T09:21:51,189 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=7dfd6fe000bc8ebd2fc5a572e1596e7a] 2024-11-28T09:21:51,192 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7dfd6fe000bc8ebd2fc5a572e1596e7a#B#compaction#223 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:21:51,193 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/B/4dd01a6ceb0c4ea6b2cbf0bf27996431 is 50, key is test_row_0/B:col10/1732785709913/Put/seqid=0 2024-11-28T09:21:51,198 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241128386919e207d94cf6a0f2837d56e7ff6b_7dfd6fe000bc8ebd2fc5a572e1596e7a store=[table=TestAcidGuarantees family=A region=7dfd6fe000bc8ebd2fc5a572e1596e7a] 2024-11-28T09:21:51,200 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241128386919e207d94cf6a0f2837d56e7ff6b_7dfd6fe000bc8ebd2fc5a572e1596e7a, store=[table=TestAcidGuarantees family=A region=7dfd6fe000bc8ebd2fc5a572e1596e7a] 2024-11-28T09:21:51,200 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128386919e207d94cf6a0f2837d56e7ff6b_7dfd6fe000bc8ebd2fc5a572e1596e7a because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=7dfd6fe000bc8ebd2fc5a572e1596e7a] 2024-11-28T09:21:51,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742090_1266 (size=13187) 2024-11-28T09:21:51,209 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7dfd6fe000bc8ebd2fc5a572e1596e7a 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-28T09:21:51,209 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7dfd6fe000bc8ebd2fc5a572e1596e7a, store=A 2024-11-28T09:21:51,210 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:51,210 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7dfd6fe000bc8ebd2fc5a572e1596e7a, store=B 2024-11-28T09:21:51,210 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:51,210 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7dfd6fe000bc8ebd2fc5a572e1596e7a, store=C 2024-11-28T09:21:51,210 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:51,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 7dfd6fe000bc8ebd2fc5a572e1596e7a 2024-11-28T09:21:51,211 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/B/4dd01a6ceb0c4ea6b2cbf0bf27996431 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/4dd01a6ceb0c4ea6b2cbf0bf27996431 2024-11-28T09:21:51,221 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7dfd6fe000bc8ebd2fc5a572e1596e7a/B of 7dfd6fe000bc8ebd2fc5a572e1596e7a into 4dd01a6ceb0c4ea6b2cbf0bf27996431(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:21:51,221 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7dfd6fe000bc8ebd2fc5a572e1596e7a: 2024-11-28T09:21:51,221 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a., storeName=7dfd6fe000bc8ebd2fc5a572e1596e7a/B, priority=13, startTime=1732785711164; duration=0sec 2024-11-28T09:21:51,221 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:21:51,221 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7dfd6fe000bc8ebd2fc5a572e1596e7a:B 2024-11-28T09:21:51,221 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:21:51,231 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:21:51,231 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1540): 7dfd6fe000bc8ebd2fc5a572e1596e7a/C is initiating minor compaction (all files) 2024-11-28T09:21:51,231 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7dfd6fe000bc8ebd2fc5a572e1596e7a/C in TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:51,231 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/042828c3f16b49d887fc9594d2e6c606, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/8483170b57fb42738f04d88449b0038e, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/6cdef821b67f460aaeed55bdb199103a] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp, totalSize=36.8 K 2024-11-28T09:21:51,232 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 042828c3f16b49d887fc9594d2e6c606, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=332, earliestPutTs=1732785708910 2024-11-28T09:21:51,233 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8483170b57fb42738f04d88449b0038e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=358, earliestPutTs=1732785708962 2024-11-28T09:21:51,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742091_1267 (size=4469) 2024-11-28T09:21:51,234 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6cdef821b67f460aaeed55bdb199103a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=372, earliestPutTs=1732785709913 2024-11-28T09:21:51,234 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7dfd6fe000bc8ebd2fc5a572e1596e7a#A#compaction#222 average throughput is 0.54 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:21:51,235 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/94d1328c5cc649939b7c3e167dca2356 is 175, key is test_row_0/A:col10/1732785709913/Put/seqid=0 2024-11-28T09:21:51,245 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411286b8e82e4b56448639640867b7d89915b_7dfd6fe000bc8ebd2fc5a572e1596e7a is 50, key is test_row_0/A:col10/1732785710591/Put/seqid=0 2024-11-28T09:21:51,251 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7dfd6fe000bc8ebd2fc5a572e1596e7a#C#compaction#225 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:21:51,251 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/C/ac0aade824504144ac45ed8cbe06e015 is 50, key is test_row_0/C:col10/1732785709913/Put/seqid=0 2024-11-28T09:21:51,253 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:51,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57190 deadline: 1732785771250, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:51,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742092_1268 (size=32141) 2024-11-28T09:21:51,279 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/94d1328c5cc649939b7c3e167dca2356 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/94d1328c5cc649939b7c3e167dca2356 2024-11-28T09:21:51,281 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:51,282 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-11-28T09:21:51,282 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:51,282 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. as already flushing 2024-11-28T09:21:51,282 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:51,282 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:51,282 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:51,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:51,285 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7dfd6fe000bc8ebd2fc5a572e1596e7a/A of 7dfd6fe000bc8ebd2fc5a572e1596e7a into 94d1328c5cc649939b7c3e167dca2356(size=31.4 K), total size for store is 31.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:21:51,285 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7dfd6fe000bc8ebd2fc5a572e1596e7a: 2024-11-28T09:21:51,285 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a., storeName=7dfd6fe000bc8ebd2fc5a572e1596e7a/A, priority=13, startTime=1732785711164; duration=0sec 2024-11-28T09:21:51,285 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:21:51,285 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7dfd6fe000bc8ebd2fc5a572e1596e7a:A 2024-11-28T09:21:51,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742093_1269 (size=14994) 2024-11-28T09:21:51,291 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:51,295 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411286b8e82e4b56448639640867b7d89915b_7dfd6fe000bc8ebd2fc5a572e1596e7a to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411286b8e82e4b56448639640867b7d89915b_7dfd6fe000bc8ebd2fc5a572e1596e7a 2024-11-28T09:21:51,297 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/56e674c21d774c179b78f65a7e8ef72b, store: [table=TestAcidGuarantees family=A region=7dfd6fe000bc8ebd2fc5a572e1596e7a] 2024-11-28T09:21:51,298 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/56e674c21d774c179b78f65a7e8ef72b is 175, key is test_row_0/A:col10/1732785710591/Put/seqid=0 2024-11-28T09:21:51,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742094_1270 (size=13187) 2024-11-28T09:21:51,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742095_1271 (size=39949) 2024-11-28T09:21:51,308 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=398, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/56e674c21d774c179b78f65a7e8ef72b 2024-11-28T09:21:51,311 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/C/ac0aade824504144ac45ed8cbe06e015 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/ac0aade824504144ac45ed8cbe06e015 2024-11-28T09:21:51,316 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7dfd6fe000bc8ebd2fc5a572e1596e7a/C of 7dfd6fe000bc8ebd2fc5a572e1596e7a into ac0aade824504144ac45ed8cbe06e015(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:21:51,316 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7dfd6fe000bc8ebd2fc5a572e1596e7a: 2024-11-28T09:21:51,316 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a., storeName=7dfd6fe000bc8ebd2fc5a572e1596e7a/C, priority=13, startTime=1732785711164; duration=0sec 2024-11-28T09:21:51,316 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:21:51,316 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7dfd6fe000bc8ebd2fc5a572e1596e7a:C 2024-11-28T09:21:51,338 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/B/c6a3f3925c224b6582f26aaba301d01b is 50, key is test_row_0/B:col10/1732785710591/Put/seqid=0 2024-11-28T09:21:51,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742096_1272 (size=12301) 2024-11-28T09:21:51,356 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:51,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57190 deadline: 1732785771355, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:51,436 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:51,437 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-11-28T09:21:51,437 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:51,437 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. as already flushing 2024-11-28T09:21:51,437 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:51,437 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:51,437 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:51,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:51,559 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:51,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57190 deadline: 1732785771558, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:51,589 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:51,589 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-11-28T09:21:51,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:51,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. as already flushing 2024-11-28T09:21:51,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:51,590 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:51,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:51,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:51,741 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:51,741 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-11-28T09:21:51,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:51,742 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. as already flushing 2024-11-28T09:21:51,742 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:51,742 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:51,742 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:51,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:51,745 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=398 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/B/c6a3f3925c224b6582f26aaba301d01b 2024-11-28T09:21:51,754 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/C/6e2d98617f524d40af318ff877cf7810 is 50, key is test_row_0/C:col10/1732785710591/Put/seqid=0 2024-11-28T09:21:51,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742097_1273 (size=12301) 2024-11-28T09:21:51,862 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:51,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57190 deadline: 1732785771862, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:51,894 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:51,895 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-11-28T09:21:51,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:51,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. as already flushing 2024-11-28T09:21:51,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:51,895 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:51,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:51,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:52,048 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:52,049 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-11-28T09:21:52,049 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:52,049 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. as already flushing 2024-11-28T09:21:52,049 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:52,049 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] handler.RSProcedureHandler(58): pid=62 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:52,049 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=62 java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:52,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=62 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:21:52,160 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=398 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/C/6e2d98617f524d40af318ff877cf7810 2024-11-28T09:21:52,165 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/56e674c21d774c179b78f65a7e8ef72b as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/56e674c21d774c179b78f65a7e8ef72b 2024-11-28T09:21:52,169 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/56e674c21d774c179b78f65a7e8ef72b, entries=200, sequenceid=398, filesize=39.0 K 2024-11-28T09:21:52,170 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/B/c6a3f3925c224b6582f26aaba301d01b as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/c6a3f3925c224b6582f26aaba301d01b 2024-11-28T09:21:52,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,174 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/c6a3f3925c224b6582f26aaba301d01b, entries=150, sequenceid=398, filesize=12.0 K 2024-11-28T09:21:52,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,175 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/C/6e2d98617f524d40af318ff877cf7810 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/6e2d98617f524d40af318ff877cf7810 2024-11-28T09:21:52,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,180 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/6e2d98617f524d40af318ff877cf7810, entries=150, sequenceid=398, filesize=12.0 K 2024-11-28T09:21:52,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,181 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 7dfd6fe000bc8ebd2fc5a572e1596e7a in 972ms, sequenceid=398, compaction requested=false 2024-11-28T09:21:52,181 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7dfd6fe000bc8ebd2fc5a572e1596e7a: 2024-11-28T09:21:52,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,201 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:52,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,202 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=62 2024-11-28T09:21:52,202 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:52,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,202 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2837): Flushing 7dfd6fe000bc8ebd2fc5a572e1596e7a 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-11-28T09:21:52,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,202 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7dfd6fe000bc8ebd2fc5a572e1596e7a, store=A 2024-11-28T09:21:52,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:52,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7dfd6fe000bc8ebd2fc5a572e1596e7a, store=B 2024-11-28T09:21:52,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:52,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7dfd6fe000bc8ebd2fc5a572e1596e7a, store=C 2024-11-28T09:21:52,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:52,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,238 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411282bbd88f1533a4249ac020ba2f1547def_7dfd6fe000bc8ebd2fc5a572e1596e7a is 50, key is test_row_0/A:col10/1732785711235/Put/seqid=0 2024-11-28T09:21:52,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742098_1274 (size=9914) 2024-11-28T09:21:52,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,282 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,288 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411282bbd88f1533a4249ac020ba2f1547def_7dfd6fe000bc8ebd2fc5a572e1596e7a to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411282bbd88f1533a4249ac020ba2f1547def_7dfd6fe000bc8ebd2fc5a572e1596e7a 2024-11-28T09:21:52,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,289 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/e621f530c83b4be89f66fc094f8d14bd, store: [table=TestAcidGuarantees family=A region=7dfd6fe000bc8ebd2fc5a572e1596e7a] 2024-11-28T09:21:52,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,290 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/e621f530c83b4be89f66fc094f8d14bd is 175, key is test_row_0/A:col10/1732785711235/Put/seqid=0 2024-11-28T09:21:52,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742099_1275 (size=22561) 2024-11-28T09:21:52,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 7dfd6fe000bc8ebd2fc5a572e1596e7a 2024-11-28T09:21:52,403 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. as already flushing 2024-11-28T09:21:52,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:52,557 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:52,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57190 deadline: 1732785772556, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:52,662 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:52,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57190 deadline: 1732785772660, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:52,720 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=411, memsize=15.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/e621f530c83b4be89f66fc094f8d14bd 2024-11-28T09:21:52,732 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/B/ce95049613884950be4de942519484ce is 50, key is test_row_0/B:col10/1732785711235/Put/seqid=0 2024-11-28T09:21:52,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742100_1276 (size=9857) 2024-11-28T09:21:52,866 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:52,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57190 deadline: 1732785772866, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:52,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-28T09:21:53,103 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:53,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57184 deadline: 1732785773103, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:53,104 DEBUG [Thread-765 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4135 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a., hostname=363d8d38a970,33819,1732785660637, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-28T09:21:53,113 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:53,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57148 deadline: 1732785773112, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:53,113 DEBUG [Thread-771 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4137 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a., hostname=363d8d38a970,33819,1732785660637, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-28T09:21:53,114 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:53,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57180 deadline: 1732785773114, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:53,115 DEBUG [Thread-773 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4141 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a., hostname=363d8d38a970,33819,1732785660637, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-28T09:21:53,119 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:53,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57156 deadline: 1732785773119, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:53,120 DEBUG [Thread-769 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4147 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a., hostname=363d8d38a970,33819,1732785660637, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-28T09:21:53,145 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=411 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/B/ce95049613884950be4de942519484ce 2024-11-28T09:21:53,153 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/C/8ea65d9b1e1c42b6b6e41142e067a436 is 50, key is test_row_0/C:col10/1732785711235/Put/seqid=0 2024-11-28T09:21:53,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742101_1277 (size=9857) 2024-11-28T09:21:53,174 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=411 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/C/8ea65d9b1e1c42b6b6e41142e067a436 2024-11-28T09:21:53,175 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:53,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 223 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57190 deadline: 1732785773174, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:53,180 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/e621f530c83b4be89f66fc094f8d14bd as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/e621f530c83b4be89f66fc094f8d14bd 2024-11-28T09:21:53,184 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/e621f530c83b4be89f66fc094f8d14bd, entries=100, sequenceid=411, filesize=22.0 K 2024-11-28T09:21:53,185 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/B/ce95049613884950be4de942519484ce as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/ce95049613884950be4de942519484ce 2024-11-28T09:21:53,188 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/ce95049613884950be4de942519484ce, entries=100, sequenceid=411, filesize=9.6 K 2024-11-28T09:21:53,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/C/8ea65d9b1e1c42b6b6e41142e067a436 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/8ea65d9b1e1c42b6b6e41142e067a436 2024-11-28T09:21:53,194 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/8ea65d9b1e1c42b6b6e41142e067a436, entries=100, sequenceid=411, filesize=9.6 K 2024-11-28T09:21:53,195 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=154.31 KB/158010 for 7dfd6fe000bc8ebd2fc5a572e1596e7a in 993ms, sequenceid=411, compaction requested=true 2024-11-28T09:21:53,195 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.HRegion(2538): Flush status journal for 7dfd6fe000bc8ebd2fc5a572e1596e7a: 2024-11-28T09:21:53,195 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:53,195 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=62}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=62 2024-11-28T09:21:53,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4106): Remote procedure done, pid=62 2024-11-28T09:21:53,198 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=62, resume processing ppid=61 2024-11-28T09:21:53,198 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=62, ppid=61, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 4.3730 sec 2024-11-28T09:21:53,200 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=61, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=61, table=TestAcidGuarantees in 4.3770 sec 2024-11-28T09:21:53,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 7dfd6fe000bc8ebd2fc5a572e1596e7a 2024-11-28T09:21:53,679 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7dfd6fe000bc8ebd2fc5a572e1596e7a 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-28T09:21:53,679 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7dfd6fe000bc8ebd2fc5a572e1596e7a, store=A 2024-11-28T09:21:53,679 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:53,679 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7dfd6fe000bc8ebd2fc5a572e1596e7a, store=B 2024-11-28T09:21:53,679 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:53,679 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7dfd6fe000bc8ebd2fc5a572e1596e7a, store=C 2024-11-28T09:21:53,680 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:53,688 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128857740b716de4a199afd661f50041335_7dfd6fe000bc8ebd2fc5a572e1596e7a is 50, key is test_row_0/A:col10/1732785712551/Put/seqid=0 2024-11-28T09:21:53,698 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:53,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 232 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57190 deadline: 1732785773695, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:53,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742102_1278 (size=12454) 2024-11-28T09:21:53,700 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:53,704 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128857740b716de4a199afd661f50041335_7dfd6fe000bc8ebd2fc5a572e1596e7a to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128857740b716de4a199afd661f50041335_7dfd6fe000bc8ebd2fc5a572e1596e7a 2024-11-28T09:21:53,705 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/56e36a204f924b9684d5bab853497a1e, store: [table=TestAcidGuarantees family=A region=7dfd6fe000bc8ebd2fc5a572e1596e7a] 2024-11-28T09:21:53,706 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/56e36a204f924b9684d5bab853497a1e is 175, key is test_row_0/A:col10/1732785712551/Put/seqid=0 2024-11-28T09:21:53,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742103_1279 (size=31255) 2024-11-28T09:21:53,717 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=438, memsize=53.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/56e36a204f924b9684d5bab853497a1e 2024-11-28T09:21:53,726 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/B/c6f2790429894ecd8a3b44401c56ebb7 is 50, key is test_row_0/B:col10/1732785712551/Put/seqid=0 2024-11-28T09:21:53,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742104_1280 (size=12301) 2024-11-28T09:21:53,736 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=438 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/B/c6f2790429894ecd8a3b44401c56ebb7 2024-11-28T09:21:53,744 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/C/459ac525ab374d0eb1cd161c2a74d71b is 50, key is test_row_0/C:col10/1732785712551/Put/seqid=0 2024-11-28T09:21:53,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742105_1281 (size=12301) 2024-11-28T09:21:53,761 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=438 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/C/459ac525ab374d0eb1cd161c2a74d71b 2024-11-28T09:21:53,765 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/56e36a204f924b9684d5bab853497a1e as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/56e36a204f924b9684d5bab853497a1e 2024-11-28T09:21:53,770 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/56e36a204f924b9684d5bab853497a1e, entries=150, sequenceid=438, filesize=30.5 K 2024-11-28T09:21:53,771 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/B/c6f2790429894ecd8a3b44401c56ebb7 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/c6f2790429894ecd8a3b44401c56ebb7 2024-11-28T09:21:53,776 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/c6f2790429894ecd8a3b44401c56ebb7, entries=150, sequenceid=438, filesize=12.0 K 2024-11-28T09:21:53,777 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/C/459ac525ab374d0eb1cd161c2a74d71b as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/459ac525ab374d0eb1cd161c2a74d71b 2024-11-28T09:21:53,782 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/459ac525ab374d0eb1cd161c2a74d71b, entries=150, sequenceid=438, filesize=12.0 K 2024-11-28T09:21:53,784 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for 7dfd6fe000bc8ebd2fc5a572e1596e7a in 105ms, sequenceid=438, compaction requested=true 2024-11-28T09:21:53,784 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7dfd6fe000bc8ebd2fc5a572e1596e7a: 2024-11-28T09:21:53,784 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7dfd6fe000bc8ebd2fc5a572e1596e7a:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T09:21:53,784 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:21:53,784 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7dfd6fe000bc8ebd2fc5a572e1596e7a:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T09:21:53,784 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:21:53,784 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T09:21:53,785 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7dfd6fe000bc8ebd2fc5a572e1596e7a:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T09:21:53,785 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:21:53,785 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T09:21:53,787 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 125906 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T09:21:53,787 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1540): 7dfd6fe000bc8ebd2fc5a572e1596e7a/A is initiating minor compaction (all files) 2024-11-28T09:21:53,787 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7dfd6fe000bc8ebd2fc5a572e1596e7a/A in TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:53,787 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/94d1328c5cc649939b7c3e167dca2356, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/56e674c21d774c179b78f65a7e8ef72b, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/e621f530c83b4be89f66fc094f8d14bd, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/56e36a204f924b9684d5bab853497a1e] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp, totalSize=123.0 K 2024-11-28T09:21:53,787 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:53,787 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. files: [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/94d1328c5cc649939b7c3e167dca2356, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/56e674c21d774c179b78f65a7e8ef72b, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/e621f530c83b4be89f66fc094f8d14bd, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/56e36a204f924b9684d5bab853497a1e] 2024-11-28T09:21:53,787 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 47646 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T09:21:53,787 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): 7dfd6fe000bc8ebd2fc5a572e1596e7a/B is initiating minor compaction (all files) 2024-11-28T09:21:53,787 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7dfd6fe000bc8ebd2fc5a572e1596e7a/B in TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:53,788 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/4dd01a6ceb0c4ea6b2cbf0bf27996431, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/c6a3f3925c224b6582f26aaba301d01b, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/ce95049613884950be4de942519484ce, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/c6f2790429894ecd8a3b44401c56ebb7] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp, totalSize=46.5 K 2024-11-28T09:21:53,788 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 94d1328c5cc649939b7c3e167dca2356, keycount=150, bloomtype=ROW, size=31.4 K, encoding=NONE, compression=NONE, seqNum=372, earliestPutTs=1732785709913 2024-11-28T09:21:53,788 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 56e674c21d774c179b78f65a7e8ef72b, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=398, earliestPutTs=1732785710587 2024-11-28T09:21:53,788 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 4dd01a6ceb0c4ea6b2cbf0bf27996431, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=372, earliestPutTs=1732785709913 2024-11-28T09:21:53,789 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting e621f530c83b4be89f66fc094f8d14bd, keycount=100, bloomtype=ROW, size=22.0 K, encoding=NONE, compression=NONE, seqNum=411, earliestPutTs=1732785711235 2024-11-28T09:21:53,789 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting c6a3f3925c224b6582f26aaba301d01b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=398, earliestPutTs=1732785710591 2024-11-28T09:21:53,789 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 56e36a204f924b9684d5bab853497a1e, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=438, earliestPutTs=1732785712533 2024-11-28T09:21:53,789 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting ce95049613884950be4de942519484ce, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=411, earliestPutTs=1732785711235 2024-11-28T09:21:53,790 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting c6f2790429894ecd8a3b44401c56ebb7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=438, earliestPutTs=1732785712533 2024-11-28T09:21:53,802 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=7dfd6fe000bc8ebd2fc5a572e1596e7a] 2024-11-28T09:21:53,803 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7dfd6fe000bc8ebd2fc5a572e1596e7a#B#compaction#234 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:21:53,804 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/B/7f3c164d1d5f49f5a520762a8369063a is 50, key is test_row_0/B:col10/1732785712551/Put/seqid=0 2024-11-28T09:21:53,804 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241128bd1dec0a35dc4c728902a07c55ed161b_7dfd6fe000bc8ebd2fc5a572e1596e7a store=[table=TestAcidGuarantees family=A region=7dfd6fe000bc8ebd2fc5a572e1596e7a] 2024-11-28T09:21:53,806 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241128bd1dec0a35dc4c728902a07c55ed161b_7dfd6fe000bc8ebd2fc5a572e1596e7a, store=[table=TestAcidGuarantees family=A region=7dfd6fe000bc8ebd2fc5a572e1596e7a] 2024-11-28T09:21:53,807 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128bd1dec0a35dc4c728902a07c55ed161b_7dfd6fe000bc8ebd2fc5a572e1596e7a because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=7dfd6fe000bc8ebd2fc5a572e1596e7a] 2024-11-28T09:21:53,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 7dfd6fe000bc8ebd2fc5a572e1596e7a 2024-11-28T09:21:53,808 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7dfd6fe000bc8ebd2fc5a572e1596e7a 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-28T09:21:53,809 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7dfd6fe000bc8ebd2fc5a572e1596e7a, store=A 2024-11-28T09:21:53,809 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:53,809 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7dfd6fe000bc8ebd2fc5a572e1596e7a, store=B 2024-11-28T09:21:53,809 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:53,809 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7dfd6fe000bc8ebd2fc5a572e1596e7a, store=C 2024-11-28T09:21:53,809 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:53,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742106_1282 (size=13323) 2024-11-28T09:21:53,822 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/B/7f3c164d1d5f49f5a520762a8369063a as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/7f3c164d1d5f49f5a520762a8369063a 2024-11-28T09:21:53,828 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112818f4f0bc95b841aba43f7d1bea75fbe9_7dfd6fe000bc8ebd2fc5a572e1596e7a is 50, key is test_row_0/A:col10/1732785713806/Put/seqid=0 2024-11-28T09:21:53,830 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 7dfd6fe000bc8ebd2fc5a572e1596e7a/B of 7dfd6fe000bc8ebd2fc5a572e1596e7a into 7f3c164d1d5f49f5a520762a8369063a(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:21:53,830 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7dfd6fe000bc8ebd2fc5a572e1596e7a: 2024-11-28T09:21:53,830 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a., storeName=7dfd6fe000bc8ebd2fc5a572e1596e7a/B, priority=12, startTime=1732785713784; duration=0sec 2024-11-28T09:21:53,830 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:21:53,830 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7dfd6fe000bc8ebd2fc5a572e1596e7a:B 2024-11-28T09:21:53,830 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T09:21:53,832 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 47646 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T09:21:53,832 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): 7dfd6fe000bc8ebd2fc5a572e1596e7a/C is initiating minor compaction (all files) 2024-11-28T09:21:53,832 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7dfd6fe000bc8ebd2fc5a572e1596e7a/C in TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:53,832 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/ac0aade824504144ac45ed8cbe06e015, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/6e2d98617f524d40af318ff877cf7810, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/8ea65d9b1e1c42b6b6e41142e067a436, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/459ac525ab374d0eb1cd161c2a74d71b] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp, totalSize=46.5 K 2024-11-28T09:21:53,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742107_1283 (size=4469) 2024-11-28T09:21:53,835 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting ac0aade824504144ac45ed8cbe06e015, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=372, earliestPutTs=1732785709913 2024-11-28T09:21:53,835 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 6e2d98617f524d40af318ff877cf7810, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=398, earliestPutTs=1732785710591 2024-11-28T09:21:53,836 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7dfd6fe000bc8ebd2fc5a572e1596e7a#A#compaction#235 average throughput is 0.72 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:21:53,836 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 8ea65d9b1e1c42b6b6e41142e067a436, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=411, earliestPutTs=1732785711235 2024-11-28T09:21:53,836 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/cfcc87671ddf44cd8461e29e5389ad76 is 175, key is test_row_0/A:col10/1732785712551/Put/seqid=0 2024-11-28T09:21:53,837 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 459ac525ab374d0eb1cd161c2a74d71b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=438, earliestPutTs=1732785712533 2024-11-28T09:21:53,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742109_1285 (size=32277) 2024-11-28T09:21:53,851 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7dfd6fe000bc8ebd2fc5a572e1596e7a#C#compaction#237 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:21:53,852 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/C/ef6500d2efac4a588c8e4e6f102a38c4 is 50, key is test_row_0/C:col10/1732785712551/Put/seqid=0 2024-11-28T09:21:53,854 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/cfcc87671ddf44cd8461e29e5389ad76 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/cfcc87671ddf44cd8461e29e5389ad76 2024-11-28T09:21:53,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742108_1284 (size=17534) 2024-11-28T09:21:53,859 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 7dfd6fe000bc8ebd2fc5a572e1596e7a/A of 7dfd6fe000bc8ebd2fc5a572e1596e7a into cfcc87671ddf44cd8461e29e5389ad76(size=31.5 K), total size for store is 31.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:21:53,859 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7dfd6fe000bc8ebd2fc5a572e1596e7a: 2024-11-28T09:21:53,859 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a., storeName=7dfd6fe000bc8ebd2fc5a572e1596e7a/A, priority=12, startTime=1732785713784; duration=0sec 2024-11-28T09:21:53,859 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:21:53,859 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7dfd6fe000bc8ebd2fc5a572e1596e7a:A 2024-11-28T09:21:53,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742110_1286 (size=13323) 2024-11-28T09:21:53,886 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:53,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 258 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57190 deadline: 1732785773886, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:53,988 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:53,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 260 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57190 deadline: 1732785773987, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:54,191 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:54,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 262 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57190 deadline: 1732785774189, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:54,254 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:54,258 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112818f4f0bc95b841aba43f7d1bea75fbe9_7dfd6fe000bc8ebd2fc5a572e1596e7a to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112818f4f0bc95b841aba43f7d1bea75fbe9_7dfd6fe000bc8ebd2fc5a572e1596e7a 2024-11-28T09:21:54,259 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/3e338e93ad3e482b8dcf863378997c9c, store: [table=TestAcidGuarantees family=A region=7dfd6fe000bc8ebd2fc5a572e1596e7a] 2024-11-28T09:21:54,260 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/3e338e93ad3e482b8dcf863378997c9c is 175, key is test_row_0/A:col10/1732785713806/Put/seqid=0 2024-11-28T09:21:54,268 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/C/ef6500d2efac4a588c8e4e6f102a38c4 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/ef6500d2efac4a588c8e4e6f102a38c4 2024-11-28T09:21:54,274 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 7dfd6fe000bc8ebd2fc5a572e1596e7a/C of 7dfd6fe000bc8ebd2fc5a572e1596e7a into ef6500d2efac4a588c8e4e6f102a38c4(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:21:54,274 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7dfd6fe000bc8ebd2fc5a572e1596e7a: 2024-11-28T09:21:54,274 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a., storeName=7dfd6fe000bc8ebd2fc5a572e1596e7a/C, priority=12, startTime=1732785713784; duration=0sec 2024-11-28T09:21:54,274 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:21:54,274 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7dfd6fe000bc8ebd2fc5a572e1596e7a:C 2024-11-28T09:21:54,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742111_1287 (size=48639) 2024-11-28T09:21:54,291 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=450, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/3e338e93ad3e482b8dcf863378997c9c 2024-11-28T09:21:54,299 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/B/7d38445a760b41f19a428a679651cd73 is 50, key is test_row_0/B:col10/1732785713806/Put/seqid=0 2024-11-28T09:21:54,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742112_1288 (size=12301) 2024-11-28T09:21:54,323 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=450 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/B/7d38445a760b41f19a428a679651cd73 2024-11-28T09:21:54,331 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/C/53a4c0336dd84b5c939b1ad23d002275 is 50, key is test_row_0/C:col10/1732785713806/Put/seqid=0 2024-11-28T09:21:54,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742113_1289 (size=12301) 2024-11-28T09:21:54,494 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:21:54,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 264 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:57190 deadline: 1732785774493, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 2024-11-28T09:21:54,557 DEBUG [Thread-776 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x367f47f7 to 127.0.0.1:53251 2024-11-28T09:21:54,557 DEBUG [Thread-776 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T09:21:54,559 DEBUG [Thread-778 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x247c0c93 to 127.0.0.1:53251 2024-11-28T09:21:54,559 DEBUG [Thread-778 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T09:21:54,561 DEBUG [Thread-782 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3448d233 to 127.0.0.1:53251 2024-11-28T09:21:54,561 DEBUG [Thread-782 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T09:21:54,562 DEBUG [Thread-780 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x517ff977 to 127.0.0.1:53251 2024-11-28T09:21:54,562 DEBUG [Thread-780 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T09:21:54,747 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=450 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/C/53a4c0336dd84b5c939b1ad23d002275 2024-11-28T09:21:54,751 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/3e338e93ad3e482b8dcf863378997c9c as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/3e338e93ad3e482b8dcf863378997c9c 2024-11-28T09:21:54,754 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/3e338e93ad3e482b8dcf863378997c9c, entries=250, sequenceid=450, filesize=47.5 K 2024-11-28T09:21:54,755 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/B/7d38445a760b41f19a428a679651cd73 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/7d38445a760b41f19a428a679651cd73 2024-11-28T09:21:54,758 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/7d38445a760b41f19a428a679651cd73, entries=150, sequenceid=450, filesize=12.0 K 2024-11-28T09:21:54,759 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/C/53a4c0336dd84b5c939b1ad23d002275 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/53a4c0336dd84b5c939b1ad23d002275 2024-11-28T09:21:54,762 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/53a4c0336dd84b5c939b1ad23d002275, entries=150, sequenceid=450, filesize=12.0 K 2024-11-28T09:21:54,763 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 7dfd6fe000bc8ebd2fc5a572e1596e7a in 955ms, sequenceid=450, compaction requested=false 2024-11-28T09:21:54,763 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7dfd6fe000bc8ebd2fc5a572e1596e7a: 2024-11-28T09:21:54,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 7dfd6fe000bc8ebd2fc5a572e1596e7a 2024-11-28T09:21:54,999 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 7dfd6fe000bc8ebd2fc5a572e1596e7a 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-28T09:21:54,999 DEBUG [Thread-767 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2931c73e to 127.0.0.1:53251 2024-11-28T09:21:54,999 DEBUG [Thread-767 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T09:21:54,999 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7dfd6fe000bc8ebd2fc5a572e1596e7a, store=A 2024-11-28T09:21:54,999 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:54,999 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7dfd6fe000bc8ebd2fc5a572e1596e7a, store=B 2024-11-28T09:21:54,999 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:54,999 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7dfd6fe000bc8ebd2fc5a572e1596e7a, store=C 2024-11-28T09:21:54,999 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:55,004 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128ed9d22d036ee4b24a358acf8edebc34e_7dfd6fe000bc8ebd2fc5a572e1596e7a is 50, key is test_row_0/A:col10/1732785714997/Put/seqid=0 2024-11-28T09:21:55,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742114_1290 (size=12454) 2024-11-28T09:21:55,409 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:55,412 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128ed9d22d036ee4b24a358acf8edebc34e_7dfd6fe000bc8ebd2fc5a572e1596e7a to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128ed9d22d036ee4b24a358acf8edebc34e_7dfd6fe000bc8ebd2fc5a572e1596e7a 2024-11-28T09:21:55,413 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/7b4c7a25311f49578fe572070b2ed360, store: [table=TestAcidGuarantees family=A region=7dfd6fe000bc8ebd2fc5a572e1596e7a] 2024-11-28T09:21:55,413 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/7b4c7a25311f49578fe572070b2ed360 is 175, key is test_row_0/A:col10/1732785714997/Put/seqid=0 2024-11-28T09:21:55,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742115_1291 (size=31255) 2024-11-28T09:21:55,817 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=478, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/7b4c7a25311f49578fe572070b2ed360 2024-11-28T09:21:55,824 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/B/d76dfc98567f47c7b0ca2e99b5b6549b is 50, key is test_row_0/B:col10/1732785714997/Put/seqid=0 2024-11-28T09:21:55,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742116_1292 (size=12301) 2024-11-28T09:21:56,228 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=478 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/B/d76dfc98567f47c7b0ca2e99b5b6549b 2024-11-28T09:21:56,235 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/C/1c275cc31877420dbfbb3d87f2c65ac2 is 50, key is test_row_0/C:col10/1732785714997/Put/seqid=0 2024-11-28T09:21:56,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742117_1293 (size=12301) 2024-11-28T09:21:56,642 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=478 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/C/1c275cc31877420dbfbb3d87f2c65ac2 2024-11-28T09:21:56,646 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/7b4c7a25311f49578fe572070b2ed360 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/7b4c7a25311f49578fe572070b2ed360 2024-11-28T09:21:56,649 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/7b4c7a25311f49578fe572070b2ed360, entries=150, sequenceid=478, filesize=30.5 K 2024-11-28T09:21:56,650 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/B/d76dfc98567f47c7b0ca2e99b5b6549b as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/d76dfc98567f47c7b0ca2e99b5b6549b 2024-11-28T09:21:56,654 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/d76dfc98567f47c7b0ca2e99b5b6549b, entries=150, sequenceid=478, filesize=12.0 K 2024-11-28T09:21:56,654 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/C/1c275cc31877420dbfbb3d87f2c65ac2 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/1c275cc31877420dbfbb3d87f2c65ac2 2024-11-28T09:21:56,657 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/1c275cc31877420dbfbb3d87f2c65ac2, entries=150, sequenceid=478, filesize=12.0 K 2024-11-28T09:21:56,658 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=0 B/0 for 7dfd6fe000bc8ebd2fc5a572e1596e7a in 1660ms, sequenceid=478, compaction requested=true 2024-11-28T09:21:56,658 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 7dfd6fe000bc8ebd2fc5a572e1596e7a: 2024-11-28T09:21:56,658 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7dfd6fe000bc8ebd2fc5a572e1596e7a:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T09:21:56,658 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:21:56,658 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7dfd6fe000bc8ebd2fc5a572e1596e7a:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T09:21:56,658 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:21:56,658 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:21:56,658 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7dfd6fe000bc8ebd2fc5a572e1596e7a:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T09:21:56,659 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:21:56,659 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:21:56,659 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 112171 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:21:56,659 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37925 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:21:56,659 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1540): 7dfd6fe000bc8ebd2fc5a572e1596e7a/A is initiating minor compaction (all files) 2024-11-28T09:21:56,659 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): 7dfd6fe000bc8ebd2fc5a572e1596e7a/B is initiating minor compaction (all files) 2024-11-28T09:21:56,659 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7dfd6fe000bc8ebd2fc5a572e1596e7a/A in TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:56,659 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7dfd6fe000bc8ebd2fc5a572e1596e7a/B in TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:56,660 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/cfcc87671ddf44cd8461e29e5389ad76, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/3e338e93ad3e482b8dcf863378997c9c, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/7b4c7a25311f49578fe572070b2ed360] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp, totalSize=109.5 K 2024-11-28T09:21:56,660 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:56,660 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. files: [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/cfcc87671ddf44cd8461e29e5389ad76, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/3e338e93ad3e482b8dcf863378997c9c, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/7b4c7a25311f49578fe572070b2ed360] 2024-11-28T09:21:56,660 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/7f3c164d1d5f49f5a520762a8369063a, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/7d38445a760b41f19a428a679651cd73, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/d76dfc98567f47c7b0ca2e99b5b6549b] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp, totalSize=37.0 K 2024-11-28T09:21:56,660 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 7f3c164d1d5f49f5a520762a8369063a, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=438, earliestPutTs=1732785712533 2024-11-28T09:21:56,660 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting cfcc87671ddf44cd8461e29e5389ad76, keycount=150, bloomtype=ROW, size=31.5 K, encoding=NONE, compression=NONE, seqNum=438, earliestPutTs=1732785712533 2024-11-28T09:21:56,660 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 7d38445a760b41f19a428a679651cd73, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=450, earliestPutTs=1732785713694 2024-11-28T09:21:56,660 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3e338e93ad3e482b8dcf863378997c9c, keycount=250, bloomtype=ROW, size=47.5 K, encoding=NONE, compression=NONE, seqNum=450, earliestPutTs=1732785713690 2024-11-28T09:21:56,661 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting d76dfc98567f47c7b0ca2e99b5b6549b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=478, earliestPutTs=1732785713880 2024-11-28T09:21:56,661 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7b4c7a25311f49578fe572070b2ed360, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=478, earliestPutTs=1732785713880 2024-11-28T09:21:56,668 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=7dfd6fe000bc8ebd2fc5a572e1596e7a] 2024-11-28T09:21:56,669 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411283da221df4819447ba50df473cb2ee578_7dfd6fe000bc8ebd2fc5a572e1596e7a store=[table=TestAcidGuarantees family=A region=7dfd6fe000bc8ebd2fc5a572e1596e7a] 2024-11-28T09:21:56,670 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7dfd6fe000bc8ebd2fc5a572e1596e7a#B#compaction#243 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:21:56,671 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/B/d581517048a64871acbff47236e76b2a is 50, key is test_row_0/B:col10/1732785714997/Put/seqid=0 2024-11-28T09:21:56,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742118_1294 (size=13425) 2024-11-28T09:21:56,687 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/B/d581517048a64871acbff47236e76b2a as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/d581517048a64871acbff47236e76b2a 2024-11-28T09:21:56,692 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7dfd6fe000bc8ebd2fc5a572e1596e7a/B of 7dfd6fe000bc8ebd2fc5a572e1596e7a into d581517048a64871acbff47236e76b2a(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:21:56,692 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7dfd6fe000bc8ebd2fc5a572e1596e7a: 2024-11-28T09:21:56,692 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a., storeName=7dfd6fe000bc8ebd2fc5a572e1596e7a/B, priority=13, startTime=1732785716658; duration=0sec 2024-11-28T09:21:56,692 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:21:56,692 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7dfd6fe000bc8ebd2fc5a572e1596e7a:B 2024-11-28T09:21:56,693 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:21:56,693 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37925 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:21:56,694 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): 7dfd6fe000bc8ebd2fc5a572e1596e7a/C is initiating minor compaction (all files) 2024-11-28T09:21:56,694 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 7dfd6fe000bc8ebd2fc5a572e1596e7a/C in TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:56,694 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/ef6500d2efac4a588c8e4e6f102a38c4, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/53a4c0336dd84b5c939b1ad23d002275, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/1c275cc31877420dbfbb3d87f2c65ac2] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp, totalSize=37.0 K 2024-11-28T09:21:56,694 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting ef6500d2efac4a588c8e4e6f102a38c4, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=438, earliestPutTs=1732785712533 2024-11-28T09:21:56,695 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 53a4c0336dd84b5c939b1ad23d002275, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=450, earliestPutTs=1732785713694 2024-11-28T09:21:56,695 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 1c275cc31877420dbfbb3d87f2c65ac2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=478, earliestPutTs=1732785713880 2024-11-28T09:21:56,695 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411283da221df4819447ba50df473cb2ee578_7dfd6fe000bc8ebd2fc5a572e1596e7a, store=[table=TestAcidGuarantees family=A region=7dfd6fe000bc8ebd2fc5a572e1596e7a] 2024-11-28T09:21:56,696 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411283da221df4819447ba50df473cb2ee578_7dfd6fe000bc8ebd2fc5a572e1596e7a because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=7dfd6fe000bc8ebd2fc5a572e1596e7a] 2024-11-28T09:21:56,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742119_1295 (size=4469) 2024-11-28T09:21:56,702 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7dfd6fe000bc8ebd2fc5a572e1596e7a#A#compaction#244 average throughput is 0.72 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:21:56,703 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/184bb218d7b94e95aa47b6d96338d514 is 175, key is test_row_0/A:col10/1732785714997/Put/seqid=0 2024-11-28T09:21:56,714 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7dfd6fe000bc8ebd2fc5a572e1596e7a#C#compaction#245 average throughput is 1.64 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:21:56,714 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/C/9f0c8a5da1764ee89b23a2661cc58fe2 is 50, key is test_row_0/C:col10/1732785714997/Put/seqid=0 2024-11-28T09:21:56,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742120_1296 (size=32379) 2024-11-28T09:21:56,721 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/184bb218d7b94e95aa47b6d96338d514 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/184bb218d7b94e95aa47b6d96338d514 2024-11-28T09:21:56,725 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7dfd6fe000bc8ebd2fc5a572e1596e7a/A of 7dfd6fe000bc8ebd2fc5a572e1596e7a into 184bb218d7b94e95aa47b6d96338d514(size=31.6 K), total size for store is 31.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:21:56,725 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7dfd6fe000bc8ebd2fc5a572e1596e7a: 2024-11-28T09:21:56,725 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a., storeName=7dfd6fe000bc8ebd2fc5a572e1596e7a/A, priority=13, startTime=1732785716658; duration=0sec 2024-11-28T09:21:56,725 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:21:56,725 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7dfd6fe000bc8ebd2fc5a572e1596e7a:A 2024-11-28T09:21:56,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742121_1297 (size=13425) 2024-11-28T09:21:56,730 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/C/9f0c8a5da1764ee89b23a2661cc58fe2 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/9f0c8a5da1764ee89b23a2661cc58fe2 2024-11-28T09:21:56,734 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 7dfd6fe000bc8ebd2fc5a572e1596e7a/C of 7dfd6fe000bc8ebd2fc5a572e1596e7a into 9f0c8a5da1764ee89b23a2661cc58fe2(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:21:56,734 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 7dfd6fe000bc8ebd2fc5a572e1596e7a: 2024-11-28T09:21:56,734 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a., storeName=7dfd6fe000bc8ebd2fc5a572e1596e7a/C, priority=13, startTime=1732785716658; duration=0sec 2024-11-28T09:21:56,734 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:21:56,734 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7dfd6fe000bc8ebd2fc5a572e1596e7a:C 2024-11-28T09:21:56,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-28T09:21:56,930 INFO [Thread-775 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 61 completed 2024-11-28T09:21:57,137 DEBUG [Thread-769 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x176c5c1b to 127.0.0.1:53251 2024-11-28T09:21:57,137 DEBUG [Thread-769 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T09:21:57,140 DEBUG [Thread-765 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1c826820 to 127.0.0.1:53251 2024-11-28T09:21:57,140 DEBUG [Thread-765 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T09:21:57,142 DEBUG [Thread-771 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x190853fc to 127.0.0.1:53251 2024-11-28T09:21:57,142 DEBUG [Thread-771 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T09:21:57,155 DEBUG [Thread-773 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x46114993 to 127.0.0.1:53251 2024-11-28T09:21:57,155 DEBUG [Thread-773 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T09:21:57,155 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-28T09:21:57,155 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 53 2024-11-28T09:21:57,155 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 175 2024-11-28T09:21:57,155 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 51 2024-11-28T09:21:57,155 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 50 2024-11-28T09:21:57,155 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 49 2024-11-28T09:21:57,155 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-28T09:21:57,155 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5169 2024-11-28T09:21:57,155 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5272 2024-11-28T09:21:57,155 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-28T09:21:57,155 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2256 2024-11-28T09:21:57,155 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 6768 rows 2024-11-28T09:21:57,155 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2255 2024-11-28T09:21:57,155 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 6765 rows 2024-11-28T09:21:57,155 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-28T09:21:57,155 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4e560c7b to 127.0.0.1:53251 2024-11-28T09:21:57,155 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T09:21:57,157 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-28T09:21:57,158 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-28T09:21:57,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] procedure2.ProcedureExecutor(1098): Stored pid=63, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-28T09:21:57,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-28T09:21:57,163 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732785717162"}]},"ts":"1732785717162"} 2024-11-28T09:21:57,164 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-28T09:21:57,167 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-28T09:21:57,167 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=64, ppid=63, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-28T09:21:57,168 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=65, ppid=64, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=7dfd6fe000bc8ebd2fc5a572e1596e7a, UNASSIGN}] 2024-11-28T09:21:57,168 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=65, ppid=64, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=7dfd6fe000bc8ebd2fc5a572e1596e7a, UNASSIGN 2024-11-28T09:21:57,169 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=65 updating hbase:meta row=7dfd6fe000bc8ebd2fc5a572e1596e7a, regionState=CLOSING, regionLocation=363d8d38a970,33819,1732785660637 2024-11-28T09:21:57,170 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-28T09:21:57,170 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=66, ppid=65, state=RUNNABLE; CloseRegionProcedure 7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637}] 2024-11-28T09:21:57,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-28T09:21:57,321 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:21:57,322 INFO [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] handler.UnassignRegionHandler(124): Close 7dfd6fe000bc8ebd2fc5a572e1596e7a 2024-11-28T09:21:57,322 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-28T09:21:57,322 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1681): Closing 7dfd6fe000bc8ebd2fc5a572e1596e7a, disabling compactions & flushes 2024-11-28T09:21:57,322 INFO [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:57,322 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:57,322 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. after waiting 0 ms 2024-11-28T09:21:57,322 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:57,322 INFO [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(2837): Flushing 7dfd6fe000bc8ebd2fc5a572e1596e7a 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-11-28T09:21:57,322 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7dfd6fe000bc8ebd2fc5a572e1596e7a, store=A 2024-11-28T09:21:57,322 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:57,323 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7dfd6fe000bc8ebd2fc5a572e1596e7a, store=B 2024-11-28T09:21:57,323 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:57,323 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 7dfd6fe000bc8ebd2fc5a572e1596e7a, store=C 2024-11-28T09:21:57,323 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:21:57,329 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128b86fa011ebd64e989e11e96c74cce472_7dfd6fe000bc8ebd2fc5a572e1596e7a is 50, key is test_row_0/A:col10/1732785717154/Put/seqid=0 2024-11-28T09:21:57,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742122_1298 (size=9914) 2024-11-28T09:21:57,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-28T09:21:57,733 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:57,737 INFO [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128b86fa011ebd64e989e11e96c74cce472_7dfd6fe000bc8ebd2fc5a572e1596e7a to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128b86fa011ebd64e989e11e96c74cce472_7dfd6fe000bc8ebd2fc5a572e1596e7a 2024-11-28T09:21:57,737 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/e648b5c0a5ab42c1983fa6f96635ceb7, store: [table=TestAcidGuarantees family=A region=7dfd6fe000bc8ebd2fc5a572e1596e7a] 2024-11-28T09:21:57,738 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/e648b5c0a5ab42c1983fa6f96635ceb7 is 175, key is test_row_0/A:col10/1732785717154/Put/seqid=0 2024-11-28T09:21:57,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742123_1299 (size=22561) 2024-11-28T09:21:57,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-28T09:21:58,142 INFO [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=488, memsize=8.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/e648b5c0a5ab42c1983fa6f96635ceb7 2024-11-28T09:21:58,149 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/B/b7dbe19d55214703815df7c6fa99b1eb is 50, key is test_row_0/B:col10/1732785717154/Put/seqid=0 2024-11-28T09:21:58,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742124_1300 (size=9857) 2024-11-28T09:21:58,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-28T09:21:58,553 INFO [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=488 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/B/b7dbe19d55214703815df7c6fa99b1eb 2024-11-28T09:21:58,560 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/C/43dd86ae03fc47ae9cc0292d62343234 is 50, key is test_row_0/C:col10/1732785717154/Put/seqid=0 2024-11-28T09:21:58,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742125_1301 (size=9857) 2024-11-28T09:21:58,964 INFO [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=488 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/C/43dd86ae03fc47ae9cc0292d62343234 2024-11-28T09:21:58,968 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/A/e648b5c0a5ab42c1983fa6f96635ceb7 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/e648b5c0a5ab42c1983fa6f96635ceb7 2024-11-28T09:21:58,971 INFO [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/e648b5c0a5ab42c1983fa6f96635ceb7, entries=100, sequenceid=488, filesize=22.0 K 2024-11-28T09:21:58,971 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/B/b7dbe19d55214703815df7c6fa99b1eb as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/b7dbe19d55214703815df7c6fa99b1eb 2024-11-28T09:21:58,974 INFO [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/b7dbe19d55214703815df7c6fa99b1eb, entries=100, sequenceid=488, filesize=9.6 K 2024-11-28T09:21:58,974 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/.tmp/C/43dd86ae03fc47ae9cc0292d62343234 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/43dd86ae03fc47ae9cc0292d62343234 2024-11-28T09:21:58,977 INFO [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/43dd86ae03fc47ae9cc0292d62343234, entries=100, sequenceid=488, filesize=9.6 K 2024-11-28T09:21:58,978 INFO [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=0 B/0 for 7dfd6fe000bc8ebd2fc5a572e1596e7a in 1656ms, sequenceid=488, compaction requested=false 2024-11-28T09:21:58,978 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/09a3a325e8d54edb96910c7d02943c8e, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/b488f9c3dccc445a82918f28ef505d7f, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/71d1ec676a3d4b76aa414abb1e3877a2, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/dc810fc4725441ebbd28481cf0164077, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/64a06dc61c9b4ff0948cf6d229cd2a46, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/eb655df444234c53babea1370fbc0bd4, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/583b4e1dcf674169a786ce0288f845ac, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/bdd8be3a5a9049018d23eb3c4011cf8b, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/e11f2d03998b41ed98af278769030a84, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/eb21893ed82743429364bbc28c630a44, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/3424388841854f1ba988e7db88b3fcec, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/bdd3f3044fd44d1586ccb52e15789b94, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/3f50cdb5395c4c9c8d43bf2edf503013, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/eec067f126e24c849982a04c62e7f605, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/d1174b57b40b407698e21076bcc8f71d, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/15d4fcf7089a4e489cefb699f391492c, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/c4cc41189a924046b3c16feb55668b72, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/51c21f2dd3594b078c9ed212cd1c4111, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/7515824bad4e478e9671f5dfde51f526, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/a52e7c7548924113b34f82844d486152, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/ed3c405b354748c18fb2878c02d33dde, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/b40fa953c50044da961651df39deecb9, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/18024eb021aa45608affc648ab37e536, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/f233c1d262684b249b4e44386db652e7, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/969454b694574891bc3d93e14fe8c959, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/94d1328c5cc649939b7c3e167dca2356, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/76afa567f5ff49a089d722f8fe87c8ef, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/56e674c21d774c179b78f65a7e8ef72b, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/e621f530c83b4be89f66fc094f8d14bd, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/cfcc87671ddf44cd8461e29e5389ad76, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/56e36a204f924b9684d5bab853497a1e, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/3e338e93ad3e482b8dcf863378997c9c, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/7b4c7a25311f49578fe572070b2ed360] to archive 2024-11-28T09:21:58,979 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-28T09:21:58,981 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/09a3a325e8d54edb96910c7d02943c8e to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/09a3a325e8d54edb96910c7d02943c8e 2024-11-28T09:21:58,982 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/b488f9c3dccc445a82918f28ef505d7f to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/b488f9c3dccc445a82918f28ef505d7f 2024-11-28T09:21:58,983 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/71d1ec676a3d4b76aa414abb1e3877a2 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/71d1ec676a3d4b76aa414abb1e3877a2 2024-11-28T09:21:58,983 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/dc810fc4725441ebbd28481cf0164077 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/dc810fc4725441ebbd28481cf0164077 2024-11-28T09:21:58,984 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-28T09:21:58,984 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/64a06dc61c9b4ff0948cf6d229cd2a46 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/64a06dc61c9b4ff0948cf6d229cd2a46 2024-11-28T09:21:58,985 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/eb655df444234c53babea1370fbc0bd4 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/eb655df444234c53babea1370fbc0bd4 2024-11-28T09:21:58,986 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/583b4e1dcf674169a786ce0288f845ac to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/583b4e1dcf674169a786ce0288f845ac 2024-11-28T09:21:58,987 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/bdd8be3a5a9049018d23eb3c4011cf8b to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/bdd8be3a5a9049018d23eb3c4011cf8b 2024-11-28T09:21:58,988 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/e11f2d03998b41ed98af278769030a84 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/e11f2d03998b41ed98af278769030a84 2024-11-28T09:21:58,989 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/eb21893ed82743429364bbc28c630a44 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/eb21893ed82743429364bbc28c630a44 2024-11-28T09:21:58,989 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/3424388841854f1ba988e7db88b3fcec to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/3424388841854f1ba988e7db88b3fcec 2024-11-28T09:21:58,990 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/bdd3f3044fd44d1586ccb52e15789b94 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/bdd3f3044fd44d1586ccb52e15789b94 2024-11-28T09:21:58,991 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/3f50cdb5395c4c9c8d43bf2edf503013 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/3f50cdb5395c4c9c8d43bf2edf503013 2024-11-28T09:21:58,992 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/eec067f126e24c849982a04c62e7f605 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/eec067f126e24c849982a04c62e7f605 2024-11-28T09:21:58,993 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/d1174b57b40b407698e21076bcc8f71d to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/d1174b57b40b407698e21076bcc8f71d 2024-11-28T09:21:58,994 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/15d4fcf7089a4e489cefb699f391492c to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/15d4fcf7089a4e489cefb699f391492c 2024-11-28T09:21:58,995 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/c4cc41189a924046b3c16feb55668b72 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/c4cc41189a924046b3c16feb55668b72 2024-11-28T09:21:58,996 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/51c21f2dd3594b078c9ed212cd1c4111 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/51c21f2dd3594b078c9ed212cd1c4111 2024-11-28T09:21:58,997 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/7515824bad4e478e9671f5dfde51f526 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/7515824bad4e478e9671f5dfde51f526 2024-11-28T09:21:58,998 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/a52e7c7548924113b34f82844d486152 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/a52e7c7548924113b34f82844d486152 2024-11-28T09:21:58,999 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/ed3c405b354748c18fb2878c02d33dde to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/ed3c405b354748c18fb2878c02d33dde 2024-11-28T09:21:59,000 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/b40fa953c50044da961651df39deecb9 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/b40fa953c50044da961651df39deecb9 2024-11-28T09:21:59,001 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/18024eb021aa45608affc648ab37e536 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/18024eb021aa45608affc648ab37e536 2024-11-28T09:21:59,002 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/f233c1d262684b249b4e44386db652e7 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/f233c1d262684b249b4e44386db652e7 2024-11-28T09:21:59,003 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/969454b694574891bc3d93e14fe8c959 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/969454b694574891bc3d93e14fe8c959 2024-11-28T09:21:59,004 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/94d1328c5cc649939b7c3e167dca2356 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/94d1328c5cc649939b7c3e167dca2356 2024-11-28T09:21:59,005 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/76afa567f5ff49a089d722f8fe87c8ef to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/76afa567f5ff49a089d722f8fe87c8ef 2024-11-28T09:21:59,006 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/56e674c21d774c179b78f65a7e8ef72b to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/56e674c21d774c179b78f65a7e8ef72b 2024-11-28T09:21:59,006 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/e621f530c83b4be89f66fc094f8d14bd to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/e621f530c83b4be89f66fc094f8d14bd 2024-11-28T09:21:59,007 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/cfcc87671ddf44cd8461e29e5389ad76 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/cfcc87671ddf44cd8461e29e5389ad76 2024-11-28T09:21:59,008 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/56e36a204f924b9684d5bab853497a1e to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/56e36a204f924b9684d5bab853497a1e 2024-11-28T09:21:59,009 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/3e338e93ad3e482b8dcf863378997c9c to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/3e338e93ad3e482b8dcf863378997c9c 2024-11-28T09:21:59,010 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/7b4c7a25311f49578fe572070b2ed360 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/7b4c7a25311f49578fe572070b2ed360 2024-11-28T09:21:59,011 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/20bcc31e1d7f4ed08a25a121279e88e8, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/ef5107a2c5ac4b3da197110247c08dcb, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/c0c94a3ccb284f78a8a23b609b64d3f2, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/cb8505e8c36b474484c7ca9dccc88256, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/c63812a0f67543b9a093d014e80c8d96, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/1d6f8adf3cef433aba3fa0153ee56251, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/2d8d81e2eb9049e68a167a06e9c3fbe3, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/4f7c9f26ae9449c3bd0c59ae48595b69, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/fabf405dea7942f990f231d61577e492, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/77ee0aae888248079b3973bfd93c4592, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/a77f673491374add8b1d67c3157ac18d, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/23fd07e16a714fa6a6ed5ac99bc75a71, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/a0db175e655d459d8019c831a6834362, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/38a88dd4b26742468ab0b072787b52c8, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/d7905fb379b14d56b4a44a94bcd8d425, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/3749cffa20604a858808acdcfa7b8e90, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/d1d20a4ff2c94714b3bc7f7c2a598f7a, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/fdd02c131fdd4664b167cc76f719e602, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/47c3babf7dde4f2291f8eef990cdac96, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/bdce3fef871b422d9f519a19bebdb0a4, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/1f726d9518c649129c10b98236b41f30, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/3c01bd2dedc04d5eab68bdd9868e7aad, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/16062192dff54216b2674e591b00164f, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/2e397e6df0b44a47b12da708fe475965, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/5a5838d2b3054785ae4b807d7bcef092, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/4dd01a6ceb0c4ea6b2cbf0bf27996431, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/5a5de9ea9e2244f79775b985ac2418f9, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/c6a3f3925c224b6582f26aaba301d01b, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/ce95049613884950be4de942519484ce, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/7f3c164d1d5f49f5a520762a8369063a, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/c6f2790429894ecd8a3b44401c56ebb7, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/7d38445a760b41f19a428a679651cd73, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/d76dfc98567f47c7b0ca2e99b5b6549b] to archive 2024-11-28T09:21:59,012 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-28T09:21:59,013 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/20bcc31e1d7f4ed08a25a121279e88e8 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/20bcc31e1d7f4ed08a25a121279e88e8 2024-11-28T09:21:59,014 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/ef5107a2c5ac4b3da197110247c08dcb to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/ef5107a2c5ac4b3da197110247c08dcb 2024-11-28T09:21:59,015 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/c0c94a3ccb284f78a8a23b609b64d3f2 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/c0c94a3ccb284f78a8a23b609b64d3f2 2024-11-28T09:21:59,016 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/cb8505e8c36b474484c7ca9dccc88256 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/cb8505e8c36b474484c7ca9dccc88256 2024-11-28T09:21:59,017 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/c63812a0f67543b9a093d014e80c8d96 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/c63812a0f67543b9a093d014e80c8d96 2024-11-28T09:21:59,018 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/1d6f8adf3cef433aba3fa0153ee56251 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/1d6f8adf3cef433aba3fa0153ee56251 2024-11-28T09:21:59,019 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/2d8d81e2eb9049e68a167a06e9c3fbe3 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/2d8d81e2eb9049e68a167a06e9c3fbe3 2024-11-28T09:21:59,020 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/4f7c9f26ae9449c3bd0c59ae48595b69 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/4f7c9f26ae9449c3bd0c59ae48595b69 2024-11-28T09:21:59,021 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/fabf405dea7942f990f231d61577e492 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/fabf405dea7942f990f231d61577e492 2024-11-28T09:21:59,022 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/77ee0aae888248079b3973bfd93c4592 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/77ee0aae888248079b3973bfd93c4592 2024-11-28T09:21:59,023 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/a77f673491374add8b1d67c3157ac18d to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/a77f673491374add8b1d67c3157ac18d 2024-11-28T09:21:59,023 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/23fd07e16a714fa6a6ed5ac99bc75a71 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/23fd07e16a714fa6a6ed5ac99bc75a71 2024-11-28T09:21:59,024 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/a0db175e655d459d8019c831a6834362 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/a0db175e655d459d8019c831a6834362 2024-11-28T09:21:59,025 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/38a88dd4b26742468ab0b072787b52c8 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/38a88dd4b26742468ab0b072787b52c8 2024-11-28T09:21:59,026 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/d7905fb379b14d56b4a44a94bcd8d425 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/d7905fb379b14d56b4a44a94bcd8d425 2024-11-28T09:21:59,027 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/3749cffa20604a858808acdcfa7b8e90 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/3749cffa20604a858808acdcfa7b8e90 2024-11-28T09:21:59,028 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/d1d20a4ff2c94714b3bc7f7c2a598f7a to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/d1d20a4ff2c94714b3bc7f7c2a598f7a 2024-11-28T09:21:59,029 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/fdd02c131fdd4664b167cc76f719e602 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/fdd02c131fdd4664b167cc76f719e602 2024-11-28T09:21:59,030 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/47c3babf7dde4f2291f8eef990cdac96 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/47c3babf7dde4f2291f8eef990cdac96 2024-11-28T09:21:59,031 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/bdce3fef871b422d9f519a19bebdb0a4 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/bdce3fef871b422d9f519a19bebdb0a4 2024-11-28T09:21:59,031 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/1f726d9518c649129c10b98236b41f30 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/1f726d9518c649129c10b98236b41f30 2024-11-28T09:21:59,032 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/3c01bd2dedc04d5eab68bdd9868e7aad to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/3c01bd2dedc04d5eab68bdd9868e7aad 2024-11-28T09:21:59,033 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/16062192dff54216b2674e591b00164f to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/16062192dff54216b2674e591b00164f 2024-11-28T09:21:59,034 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/2e397e6df0b44a47b12da708fe475965 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/2e397e6df0b44a47b12da708fe475965 2024-11-28T09:21:59,035 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/5a5838d2b3054785ae4b807d7bcef092 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/5a5838d2b3054785ae4b807d7bcef092 2024-11-28T09:21:59,036 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/4dd01a6ceb0c4ea6b2cbf0bf27996431 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/4dd01a6ceb0c4ea6b2cbf0bf27996431 2024-11-28T09:21:59,036 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/5a5de9ea9e2244f79775b985ac2418f9 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/5a5de9ea9e2244f79775b985ac2418f9 2024-11-28T09:21:59,037 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/c6a3f3925c224b6582f26aaba301d01b to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/c6a3f3925c224b6582f26aaba301d01b 2024-11-28T09:21:59,038 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/ce95049613884950be4de942519484ce to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/ce95049613884950be4de942519484ce 2024-11-28T09:21:59,039 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/7f3c164d1d5f49f5a520762a8369063a to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/7f3c164d1d5f49f5a520762a8369063a 2024-11-28T09:21:59,040 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/c6f2790429894ecd8a3b44401c56ebb7 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/c6f2790429894ecd8a3b44401c56ebb7 2024-11-28T09:21:59,041 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/7d38445a760b41f19a428a679651cd73 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/7d38445a760b41f19a428a679651cd73 2024-11-28T09:21:59,042 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/d76dfc98567f47c7b0ca2e99b5b6549b to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/d76dfc98567f47c7b0ca2e99b5b6549b 2024-11-28T09:21:59,043 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/d53b2e24c17c4535829e64a09d9e0098, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/4c5ee1e1845d4d55bc6796a1582e1c41, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/b10aa5e6c52844d498ec068d5d57c148, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/77c1ac16b9ce433da956cd35c7ad7fe8, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/30a4825ac01a4e2cbc017661982a0d88, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/dc8d391059cc4eb4a325435479d7f975, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/a462017cafc3492aa3ad24f7b20fb5b0, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/1acffbefa4734a03be4ffb04fe65160f, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/145084ead33646b6a4ba4f1fb4ff53e1, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/463fd18b1d374bae937478c218997108, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/41bff7fdb32745c2b1ff945aa8b716fe, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/b578ef462fb34230b6ccd7ade237150b, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/88c942143bfc41bbbbb24b65129892e9, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/c87c75507c7749d783e8ce112dc6a0eb, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/2b18b563a4d2440fb9667bf7fb6aa16f, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/56269bfe51b14510bd3eaa82bdc3946c, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/5f946c37335249edb71753611e995f21, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/29be1921acf24f9ab216d93d23e02090, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/9db3db4a27734465a1f68c003db62b32, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/78850dcd4b514aae8814f4e4674f3bca, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/5f940e5e08c948fea63c423c13fad834, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/f0d218e4b44c4a7cbc6a970821829e46, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/042828c3f16b49d887fc9594d2e6c606, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/a91129f794ba419d99b5071cf577bda8, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/8483170b57fb42738f04d88449b0038e, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/ac0aade824504144ac45ed8cbe06e015, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/6cdef821b67f460aaeed55bdb199103a, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/6e2d98617f524d40af318ff877cf7810, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/8ea65d9b1e1c42b6b6e41142e067a436, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/ef6500d2efac4a588c8e4e6f102a38c4, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/459ac525ab374d0eb1cd161c2a74d71b, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/53a4c0336dd84b5c939b1ad23d002275, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/1c275cc31877420dbfbb3d87f2c65ac2] to archive 2024-11-28T09:21:59,044 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-28T09:21:59,045 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/d53b2e24c17c4535829e64a09d9e0098 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/d53b2e24c17c4535829e64a09d9e0098 2024-11-28T09:21:59,046 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/4c5ee1e1845d4d55bc6796a1582e1c41 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/4c5ee1e1845d4d55bc6796a1582e1c41 2024-11-28T09:21:59,046 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/b10aa5e6c52844d498ec068d5d57c148 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/b10aa5e6c52844d498ec068d5d57c148 2024-11-28T09:21:59,047 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/77c1ac16b9ce433da956cd35c7ad7fe8 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/77c1ac16b9ce433da956cd35c7ad7fe8 2024-11-28T09:21:59,048 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/30a4825ac01a4e2cbc017661982a0d88 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/30a4825ac01a4e2cbc017661982a0d88 2024-11-28T09:21:59,049 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/dc8d391059cc4eb4a325435479d7f975 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/dc8d391059cc4eb4a325435479d7f975 2024-11-28T09:21:59,050 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/a462017cafc3492aa3ad24f7b20fb5b0 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/a462017cafc3492aa3ad24f7b20fb5b0 2024-11-28T09:21:59,051 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/1acffbefa4734a03be4ffb04fe65160f to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/1acffbefa4734a03be4ffb04fe65160f 2024-11-28T09:21:59,052 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/145084ead33646b6a4ba4f1fb4ff53e1 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/145084ead33646b6a4ba4f1fb4ff53e1 2024-11-28T09:21:59,053 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/463fd18b1d374bae937478c218997108 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/463fd18b1d374bae937478c218997108 2024-11-28T09:21:59,054 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/41bff7fdb32745c2b1ff945aa8b716fe to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/41bff7fdb32745c2b1ff945aa8b716fe 2024-11-28T09:21:59,055 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/b578ef462fb34230b6ccd7ade237150b to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/b578ef462fb34230b6ccd7ade237150b 2024-11-28T09:21:59,056 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/88c942143bfc41bbbbb24b65129892e9 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/88c942143bfc41bbbbb24b65129892e9 2024-11-28T09:21:59,057 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/c87c75507c7749d783e8ce112dc6a0eb to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/c87c75507c7749d783e8ce112dc6a0eb 2024-11-28T09:21:59,058 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/2b18b563a4d2440fb9667bf7fb6aa16f to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/2b18b563a4d2440fb9667bf7fb6aa16f 2024-11-28T09:21:59,058 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/56269bfe51b14510bd3eaa82bdc3946c to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/56269bfe51b14510bd3eaa82bdc3946c 2024-11-28T09:21:59,059 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/5f946c37335249edb71753611e995f21 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/5f946c37335249edb71753611e995f21 2024-11-28T09:21:59,060 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/29be1921acf24f9ab216d93d23e02090 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/29be1921acf24f9ab216d93d23e02090 2024-11-28T09:21:59,061 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/9db3db4a27734465a1f68c003db62b32 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/9db3db4a27734465a1f68c003db62b32 2024-11-28T09:21:59,062 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/78850dcd4b514aae8814f4e4674f3bca to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/78850dcd4b514aae8814f4e4674f3bca 2024-11-28T09:21:59,063 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/5f940e5e08c948fea63c423c13fad834 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/5f940e5e08c948fea63c423c13fad834 2024-11-28T09:21:59,064 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/f0d218e4b44c4a7cbc6a970821829e46 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/f0d218e4b44c4a7cbc6a970821829e46 2024-11-28T09:21:59,065 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/042828c3f16b49d887fc9594d2e6c606 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/042828c3f16b49d887fc9594d2e6c606 2024-11-28T09:21:59,066 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/a91129f794ba419d99b5071cf577bda8 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/a91129f794ba419d99b5071cf577bda8 2024-11-28T09:21:59,067 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/8483170b57fb42738f04d88449b0038e to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/8483170b57fb42738f04d88449b0038e 2024-11-28T09:21:59,068 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/ac0aade824504144ac45ed8cbe06e015 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/ac0aade824504144ac45ed8cbe06e015 2024-11-28T09:21:59,069 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/6cdef821b67f460aaeed55bdb199103a to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/6cdef821b67f460aaeed55bdb199103a 2024-11-28T09:21:59,070 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/6e2d98617f524d40af318ff877cf7810 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/6e2d98617f524d40af318ff877cf7810 2024-11-28T09:21:59,071 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/8ea65d9b1e1c42b6b6e41142e067a436 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/8ea65d9b1e1c42b6b6e41142e067a436 2024-11-28T09:21:59,072 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/ef6500d2efac4a588c8e4e6f102a38c4 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/ef6500d2efac4a588c8e4e6f102a38c4 2024-11-28T09:21:59,073 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/459ac525ab374d0eb1cd161c2a74d71b to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/459ac525ab374d0eb1cd161c2a74d71b 2024-11-28T09:21:59,074 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/53a4c0336dd84b5c939b1ad23d002275 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/53a4c0336dd84b5c939b1ad23d002275 2024-11-28T09:21:59,074 DEBUG [StoreCloser-TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/1c275cc31877420dbfbb3d87f2c65ac2 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/1c275cc31877420dbfbb3d87f2c65ac2 2024-11-28T09:21:59,078 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/recovered.edits/491.seqid, newMaxSeqId=491, maxSeqId=4 2024-11-28T09:21:59,079 INFO [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a. 2024-11-28T09:21:59,079 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] regionserver.HRegion(1635): Region close journal for 7dfd6fe000bc8ebd2fc5a572e1596e7a: 2024-11-28T09:21:59,081 INFO [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=66}] handler.UnassignRegionHandler(170): Closed 7dfd6fe000bc8ebd2fc5a572e1596e7a 2024-11-28T09:21:59,081 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=65 updating hbase:meta row=7dfd6fe000bc8ebd2fc5a572e1596e7a, regionState=CLOSED 2024-11-28T09:21:59,083 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=66, resume processing ppid=65 2024-11-28T09:21:59,083 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=66, ppid=65, state=SUCCESS; CloseRegionProcedure 7dfd6fe000bc8ebd2fc5a572e1596e7a, server=363d8d38a970,33819,1732785660637 in 1.9120 sec 2024-11-28T09:21:59,084 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=65, resume processing ppid=64 2024-11-28T09:21:59,084 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=65, ppid=64, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=7dfd6fe000bc8ebd2fc5a572e1596e7a, UNASSIGN in 1.9150 sec 2024-11-28T09:21:59,085 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=64, resume processing ppid=63 2024-11-28T09:21:59,086 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=64, ppid=63, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.9170 sec 2024-11-28T09:21:59,087 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732785719086"}]},"ts":"1732785719086"} 2024-11-28T09:21:59,087 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-28T09:21:59,089 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-28T09:21:59,090 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=63, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.9320 sec 2024-11-28T09:21:59,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-11-28T09:21:59,266 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 63 completed 2024-11-28T09:21:59,266 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-28T09:21:59,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] procedure2.ProcedureExecutor(1098): Stored pid=67, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-28T09:21:59,268 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=67, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-28T09:21:59,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-11-28T09:21:59,268 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=67, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-28T09:21:59,270 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a 2024-11-28T09:21:59,272 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A, FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B, FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C, FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/recovered.edits] 2024-11-28T09:21:59,274 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/184bb218d7b94e95aa47b6d96338d514 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/184bb218d7b94e95aa47b6d96338d514 2024-11-28T09:21:59,275 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/e648b5c0a5ab42c1983fa6f96635ceb7 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/A/e648b5c0a5ab42c1983fa6f96635ceb7 2024-11-28T09:21:59,277 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/b7dbe19d55214703815df7c6fa99b1eb to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/b7dbe19d55214703815df7c6fa99b1eb 2024-11-28T09:21:59,278 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/d581517048a64871acbff47236e76b2a to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/B/d581517048a64871acbff47236e76b2a 2024-11-28T09:21:59,280 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/43dd86ae03fc47ae9cc0292d62343234 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/43dd86ae03fc47ae9cc0292d62343234 2024-11-28T09:21:59,281 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/9f0c8a5da1764ee89b23a2661cc58fe2 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/C/9f0c8a5da1764ee89b23a2661cc58fe2 2024-11-28T09:21:59,283 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/recovered.edits/491.seqid to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a/recovered.edits/491.seqid 2024-11-28T09:21:59,284 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/7dfd6fe000bc8ebd2fc5a572e1596e7a 2024-11-28T09:21:59,284 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-28T09:21:59,284 DEBUG [PEWorker-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-28T09:21:59,285 DEBUG [PEWorker-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-11-28T09:21:59,289 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411280f613b5d84184a07954add463d2e5ed4_7dfd6fe000bc8ebd2fc5a572e1596e7a to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411280f613b5d84184a07954add463d2e5ed4_7dfd6fe000bc8ebd2fc5a572e1596e7a 2024-11-28T09:21:59,290 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112818f4f0bc95b841aba43f7d1bea75fbe9_7dfd6fe000bc8ebd2fc5a572e1596e7a to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112818f4f0bc95b841aba43f7d1bea75fbe9_7dfd6fe000bc8ebd2fc5a572e1596e7a 2024-11-28T09:21:59,291 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411282bbd88f1533a4249ac020ba2f1547def_7dfd6fe000bc8ebd2fc5a572e1596e7a to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411282bbd88f1533a4249ac020ba2f1547def_7dfd6fe000bc8ebd2fc5a572e1596e7a 2024-11-28T09:21:59,292 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411283f1ecfc2ae934c2a8bed2c025013dd56_7dfd6fe000bc8ebd2fc5a572e1596e7a to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411283f1ecfc2ae934c2a8bed2c025013dd56_7dfd6fe000bc8ebd2fc5a572e1596e7a 2024-11-28T09:21:59,293 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112842a768492e644cda8863ed995543b9e0_7dfd6fe000bc8ebd2fc5a572e1596e7a to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112842a768492e644cda8863ed995543b9e0_7dfd6fe000bc8ebd2fc5a572e1596e7a 2024-11-28T09:21:59,294 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411284a731bd733934238b912325f1c64bc77_7dfd6fe000bc8ebd2fc5a572e1596e7a to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411284a731bd733934238b912325f1c64bc77_7dfd6fe000bc8ebd2fc5a572e1596e7a 2024-11-28T09:21:59,295 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411284e262d005a094142815fc2940ddf2969_7dfd6fe000bc8ebd2fc5a572e1596e7a to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411284e262d005a094142815fc2940ddf2969_7dfd6fe000bc8ebd2fc5a572e1596e7a 2024-11-28T09:21:59,297 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411284f0a22667f544503994be60753f3ed88_7dfd6fe000bc8ebd2fc5a572e1596e7a to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411284f0a22667f544503994be60753f3ed88_7dfd6fe000bc8ebd2fc5a572e1596e7a 2024-11-28T09:21:59,298 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112855810024491442ee8aa888a900725b3a_7dfd6fe000bc8ebd2fc5a572e1596e7a to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112855810024491442ee8aa888a900725b3a_7dfd6fe000bc8ebd2fc5a572e1596e7a 2024-11-28T09:21:59,298 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411285a0669b935974ef3a69508668433d7ab_7dfd6fe000bc8ebd2fc5a572e1596e7a to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411285a0669b935974ef3a69508668433d7ab_7dfd6fe000bc8ebd2fc5a572e1596e7a 2024-11-28T09:21:59,299 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411285e2105a96d244d25b796a5a67cf8b06f_7dfd6fe000bc8ebd2fc5a572e1596e7a to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411285e2105a96d244d25b796a5a67cf8b06f_7dfd6fe000bc8ebd2fc5a572e1596e7a 2024-11-28T09:21:59,301 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411285f7d79336e124fe0ae02d24b301c5086_7dfd6fe000bc8ebd2fc5a572e1596e7a to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411285f7d79336e124fe0ae02d24b301c5086_7dfd6fe000bc8ebd2fc5a572e1596e7a 2024-11-28T09:21:59,302 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411286b8e82e4b56448639640867b7d89915b_7dfd6fe000bc8ebd2fc5a572e1596e7a to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411286b8e82e4b56448639640867b7d89915b_7dfd6fe000bc8ebd2fc5a572e1596e7a 2024-11-28T09:21:59,303 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411287eb206f27b2a45b4ba776b642905f232_7dfd6fe000bc8ebd2fc5a572e1596e7a to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411287eb206f27b2a45b4ba776b642905f232_7dfd6fe000bc8ebd2fc5a572e1596e7a 2024-11-28T09:21:59,303 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128857740b716de4a199afd661f50041335_7dfd6fe000bc8ebd2fc5a572e1596e7a to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128857740b716de4a199afd661f50041335_7dfd6fe000bc8ebd2fc5a572e1596e7a 2024-11-28T09:21:59,305 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112885ec8c4b1a114b80912b336108932fa8_7dfd6fe000bc8ebd2fc5a572e1596e7a to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112885ec8c4b1a114b80912b336108932fa8_7dfd6fe000bc8ebd2fc5a572e1596e7a 2024-11-28T09:21:59,305 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112895835dae24c14c6f837c2daebbdd2ebf_7dfd6fe000bc8ebd2fc5a572e1596e7a to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112895835dae24c14c6f837c2daebbdd2ebf_7dfd6fe000bc8ebd2fc5a572e1596e7a 2024-11-28T09:21:59,306 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112898f4fcf0abac41238fb57537c923491c_7dfd6fe000bc8ebd2fc5a572e1596e7a to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112898f4fcf0abac41238fb57537c923491c_7dfd6fe000bc8ebd2fc5a572e1596e7a 2024-11-28T09:21:59,308 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128b86fa011ebd64e989e11e96c74cce472_7dfd6fe000bc8ebd2fc5a572e1596e7a to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128b86fa011ebd64e989e11e96c74cce472_7dfd6fe000bc8ebd2fc5a572e1596e7a 2024-11-28T09:21:59,309 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128bb2749a789f54bfc877ea58a86fff8d7_7dfd6fe000bc8ebd2fc5a572e1596e7a to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128bb2749a789f54bfc877ea58a86fff8d7_7dfd6fe000bc8ebd2fc5a572e1596e7a 2024-11-28T09:21:59,310 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128be365341446841f9a74ebb24a4f6e343_7dfd6fe000bc8ebd2fc5a572e1596e7a to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128be365341446841f9a74ebb24a4f6e343_7dfd6fe000bc8ebd2fc5a572e1596e7a 2024-11-28T09:21:59,311 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128df090fbcb22740d280acfc7643ed88d5_7dfd6fe000bc8ebd2fc5a572e1596e7a to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128df090fbcb22740d280acfc7643ed88d5_7dfd6fe000bc8ebd2fc5a572e1596e7a 2024-11-28T09:21:59,312 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128ec41996a54bf4cb0bc286a7c34d72fbb_7dfd6fe000bc8ebd2fc5a572e1596e7a to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128ec41996a54bf4cb0bc286a7c34d72fbb_7dfd6fe000bc8ebd2fc5a572e1596e7a 2024-11-28T09:21:59,313 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128ed9d22d036ee4b24a358acf8edebc34e_7dfd6fe000bc8ebd2fc5a572e1596e7a to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128ed9d22d036ee4b24a358acf8edebc34e_7dfd6fe000bc8ebd2fc5a572e1596e7a 2024-11-28T09:21:59,314 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128ffff4d69422e4721bdd63720718b43b8_7dfd6fe000bc8ebd2fc5a572e1596e7a to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128ffff4d69422e4721bdd63720718b43b8_7dfd6fe000bc8ebd2fc5a572e1596e7a 2024-11-28T09:21:59,314 DEBUG [PEWorker-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-28T09:21:59,316 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=67, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-28T09:21:59,319 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-28T09:21:59,321 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-28T09:21:59,322 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=67, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-28T09:21:59,322 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-28T09:21:59,322 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732785719322"}]},"ts":"9223372036854775807"} 2024-11-28T09:21:59,324 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-28T09:21:59,324 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 7dfd6fe000bc8ebd2fc5a572e1596e7a, NAME => 'TestAcidGuarantees,,1732785691407.7dfd6fe000bc8ebd2fc5a572e1596e7a.', STARTKEY => '', ENDKEY => ''}] 2024-11-28T09:21:59,324 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-28T09:21:59,324 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732785719324"}]},"ts":"9223372036854775807"} 2024-11-28T09:21:59,326 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-28T09:21:59,328 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=67, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-28T09:21:59,329 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=67, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 62 msec 2024-11-28T09:21:59,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=67 2024-11-28T09:21:59,369 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 67 completed 2024-11-28T09:21:59,379 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithBasicPolicy#testMobMixedAtomicity Thread=242 (was 240) Potentially hanging thread: HFileArchiver-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x4ff0f410-shared-pool-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1675825963_22 at /127.0.0.1:53424 [Waiting for operation #1011] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x4ff0f410-shared-pool-11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x4ff0f410-shared-pool-9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b02eea69-620e-c121-967e-25e43ad9589c/cluster_759b5d56-d6ac-c9cb-81a4-eabfc0e7d3de/dfs/data/data1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b02eea69-620e-c121-967e-25e43ad9589c/cluster_759b5d56-d6ac-c9cb-81a4-eabfc0e7d3de/dfs/data/data2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1675825963_22 at /127.0.0.1:57426 [Waiting for operation #1498] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x4ff0f410-shared-pool-10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-54249782_22 at /127.0.0.1:53226 [Waiting for operation #1131] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-54249782_22 at /127.0.0.1:53410 [Waiting for operation #1012] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=466 (was 458) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=492 (was 382) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=4370 (was 4901) 2024-11-28T09:21:59,387 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithBasicPolicy#testGetAtomicity Thread=242, OpenFileDescriptor=466, MaxFileDescriptor=1048576, SystemLoadAverage=492, ProcessCount=11, AvailableMemoryMB=4370 2024-11-28T09:21:59,388 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-28T09:21:59,389 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-28T09:21:59,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] procedure2.ProcedureExecutor(1098): Stored pid=68, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-28T09:21:59,390 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=68, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-28T09:21:59,390 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:21:59,390 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 68 2024-11-28T09:21:59,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-11-28T09:21:59,391 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=68, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-28T09:21:59,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742126_1302 (size=960) 2024-11-28T09:21:59,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-11-28T09:21:59,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-11-28T09:21:59,798 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532 2024-11-28T09:21:59,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742127_1303 (size=53) 2024-11-28T09:21:59,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-11-28T09:22:00,204 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T09:22:00,204 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 0ad9609950a045418498b830dd929869, disabling compactions & flushes 2024-11-28T09:22:00,204 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:00,204 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:00,204 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. after waiting 0 ms 2024-11-28T09:22:00,204 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:00,204 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:00,204 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 0ad9609950a045418498b830dd929869: 2024-11-28T09:22:00,205 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=68, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-28T09:22:00,205 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732785720205"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732785720205"}]},"ts":"1732785720205"} 2024-11-28T09:22:00,206 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-28T09:22:00,207 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=68, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-28T09:22:00,207 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732785720207"}]},"ts":"1732785720207"} 2024-11-28T09:22:00,208 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-28T09:22:00,212 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=69, ppid=68, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=0ad9609950a045418498b830dd929869, ASSIGN}] 2024-11-28T09:22:00,212 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=69, ppid=68, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=0ad9609950a045418498b830dd929869, ASSIGN 2024-11-28T09:22:00,213 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=69, ppid=68, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=0ad9609950a045418498b830dd929869, ASSIGN; state=OFFLINE, location=363d8d38a970,33819,1732785660637; forceNewPlan=false, retain=false 2024-11-28T09:22:00,363 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=69 updating hbase:meta row=0ad9609950a045418498b830dd929869, regionState=OPENING, regionLocation=363d8d38a970,33819,1732785660637 2024-11-28T09:22:00,364 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=70, ppid=69, state=RUNNABLE; OpenRegionProcedure 0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637}] 2024-11-28T09:22:00,390 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-11-28T09:22:00,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-11-28T09:22:00,516 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:00,519 INFO [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:00,519 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(7285): Opening region: {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} 2024-11-28T09:22:00,519 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 0ad9609950a045418498b830dd929869 2024-11-28T09:22:00,519 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T09:22:00,520 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(7327): checking encryption for 0ad9609950a045418498b830dd929869 2024-11-28T09:22:00,520 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(7330): checking classloading for 0ad9609950a045418498b830dd929869 2024-11-28T09:22:00,521 INFO [StoreOpener-0ad9609950a045418498b830dd929869-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 0ad9609950a045418498b830dd929869 2024-11-28T09:22:00,522 INFO [StoreOpener-0ad9609950a045418498b830dd929869-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-28T09:22:00,522 INFO [StoreOpener-0ad9609950a045418498b830dd929869-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0ad9609950a045418498b830dd929869 columnFamilyName A 2024-11-28T09:22:00,522 DEBUG [StoreOpener-0ad9609950a045418498b830dd929869-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:00,523 INFO [StoreOpener-0ad9609950a045418498b830dd929869-1 {}] regionserver.HStore(327): Store=0ad9609950a045418498b830dd929869/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T09:22:00,523 INFO [StoreOpener-0ad9609950a045418498b830dd929869-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 0ad9609950a045418498b830dd929869 2024-11-28T09:22:00,524 INFO [StoreOpener-0ad9609950a045418498b830dd929869-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-28T09:22:00,524 INFO [StoreOpener-0ad9609950a045418498b830dd929869-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0ad9609950a045418498b830dd929869 columnFamilyName B 2024-11-28T09:22:00,524 DEBUG [StoreOpener-0ad9609950a045418498b830dd929869-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:00,524 INFO [StoreOpener-0ad9609950a045418498b830dd929869-1 {}] regionserver.HStore(327): Store=0ad9609950a045418498b830dd929869/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T09:22:00,524 INFO [StoreOpener-0ad9609950a045418498b830dd929869-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 0ad9609950a045418498b830dd929869 2024-11-28T09:22:00,525 INFO [StoreOpener-0ad9609950a045418498b830dd929869-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-28T09:22:00,526 INFO [StoreOpener-0ad9609950a045418498b830dd929869-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0ad9609950a045418498b830dd929869 columnFamilyName C 2024-11-28T09:22:00,526 DEBUG [StoreOpener-0ad9609950a045418498b830dd929869-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:00,526 INFO [StoreOpener-0ad9609950a045418498b830dd929869-1 {}] regionserver.HStore(327): Store=0ad9609950a045418498b830dd929869/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T09:22:00,526 INFO [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:00,527 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869 2024-11-28T09:22:00,527 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869 2024-11-28T09:22:00,528 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-28T09:22:00,529 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(1085): writing seq id for 0ad9609950a045418498b830dd929869 2024-11-28T09:22:00,531 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-28T09:22:00,531 INFO [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(1102): Opened 0ad9609950a045418498b830dd929869; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62274035, jitterRate=-0.07204456627368927}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-28T09:22:00,532 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegion(1001): Region open journal for 0ad9609950a045418498b830dd929869: 2024-11-28T09:22:00,533 INFO [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869., pid=70, masterSystemTime=1732785720516 2024-11-28T09:22:00,534 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:00,534 INFO [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=70}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:00,534 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=69 updating hbase:meta row=0ad9609950a045418498b830dd929869, regionState=OPEN, openSeqNum=2, regionLocation=363d8d38a970,33819,1732785660637 2024-11-28T09:22:00,537 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=70, resume processing ppid=69 2024-11-28T09:22:00,537 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=70, ppid=69, state=SUCCESS; OpenRegionProcedure 0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 in 171 msec 2024-11-28T09:22:00,538 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=69, resume processing ppid=68 2024-11-28T09:22:00,538 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=69, ppid=68, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=0ad9609950a045418498b830dd929869, ASSIGN in 326 msec 2024-11-28T09:22:00,539 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=68, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-28T09:22:00,539 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732785720539"}]},"ts":"1732785720539"} 2024-11-28T09:22:00,539 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-28T09:22:00,542 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=68, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-28T09:22:00,543 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=68, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1530 sec 2024-11-28T09:22:01,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-11-28T09:22:01,495 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 68 completed 2024-11-28T09:22:01,496 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7f66057f to 127.0.0.1:53251 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@53bfce45 2024-11-28T09:22:01,500 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@64dc42d9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T09:22:01,501 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T09:22:01,502 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45242, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T09:22:01,503 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-28T09:22:01,504 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55058, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-28T09:22:01,506 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x17b6adc5 to 127.0.0.1:53251 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3a569490 2024-11-28T09:22:01,509 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3c1ac389, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T09:22:01,510 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x669e1999 to 127.0.0.1:53251 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6862e3ce 2024-11-28T09:22:01,513 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@28e73c0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T09:22:01,514 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x72aa9ee5 to 127.0.0.1:53251 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@d296fed 2024-11-28T09:22:01,517 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7c480dfb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T09:22:01,518 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4ec09297 to 127.0.0.1:53251 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@8d0caa5 2024-11-28T09:22:01,520 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@34cb3991, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T09:22:01,521 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4dfb20f6 to 127.0.0.1:53251 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@43f04e0e 2024-11-28T09:22:01,524 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2e9ae050, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T09:22:01,525 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x560ec309 to 127.0.0.1:53251 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2fef31f8 2024-11-28T09:22:01,530 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@14ed1e44, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T09:22:01,532 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5886c0f2 to 127.0.0.1:53251 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@eb04aeb 2024-11-28T09:22:01,535 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@72537a47, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T09:22:01,536 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x66e575aa to 127.0.0.1:53251 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6a0e9c8f 2024-11-28T09:22:01,539 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@36642cb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T09:22:01,540 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x131ceb8f to 127.0.0.1:53251 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@d68f787 2024-11-28T09:22:01,543 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3c299cfb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T09:22:01,544 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5a78bf6d to 127.0.0.1:53251 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@10e6bf6a 2024-11-28T09:22:01,546 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@605827c9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T09:22:01,549 DEBUG [hconnection-0x7caf21b4-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T09:22:01,550 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45252, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T09:22:01,552 DEBUG [hconnection-0x871af8d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T09:22:01,553 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45266, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T09:22:01,553 DEBUG [hconnection-0x43b46f20-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T09:22:01,554 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45282, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T09:22:01,560 DEBUG [hconnection-0x45f1c47d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T09:22:01,561 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45298, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T09:22:01,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 0ad9609950a045418498b830dd929869 2024-11-28T09:22:01,564 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0ad9609950a045418498b830dd929869 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-28T09:22:01,565 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0ad9609950a045418498b830dd929869, store=A 2024-11-28T09:22:01,565 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:01,565 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0ad9609950a045418498b830dd929869, store=B 2024-11-28T09:22:01,565 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:01,565 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0ad9609950a045418498b830dd929869, store=C 2024-11-28T09:22:01,565 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:01,574 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T09:22:01,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] procedure2.ProcedureExecutor(1098): Stored pid=71, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees 2024-11-28T09:22:01,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-28T09:22:01,576 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=71, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T09:22:01,576 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=71, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T09:22:01,577 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=72, ppid=71, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T09:22:01,584 DEBUG [hconnection-0x7496089a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T09:22:01,584 DEBUG [hconnection-0x587bab53-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T09:22:01,585 DEBUG [hconnection-0x5c24314-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T09:22:01,586 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45320, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T09:22:01,586 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45314, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T09:22:01,587 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45334, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T09:22:01,592 DEBUG [hconnection-0x1bd73203-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T09:22:01,592 DEBUG [hconnection-0x100fe920-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T09:22:01,593 DEBUG [hconnection-0x1fe6947a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T09:22:01,593 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45338, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T09:22:01,593 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45340, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T09:22:01,596 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:01,596 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45346, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T09:22:01,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45320 deadline: 1732785781588, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:01,597 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:01,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45298 deadline: 1732785781589, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:01,597 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:01,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45252 deadline: 1732785781589, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:01,599 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:01,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45338 deadline: 1732785781595, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:01,605 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:01,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45346 deadline: 1732785781600, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:01,616 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/675b60891b534886a24e8a9762d211ea is 50, key is test_row_0/A:col10/1732785721561/Put/seqid=0 2024-11-28T09:22:01,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742128_1304 (size=12001) 2024-11-28T09:22:01,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-28T09:22:01,702 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:01,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45320 deadline: 1732785781698, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:01,702 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:01,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45252 deadline: 1732785781698, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:01,703 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:01,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45298 deadline: 1732785781699, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:01,703 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:01,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45338 deadline: 1732785781701, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:01,708 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:01,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45346 deadline: 1732785781706, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:01,728 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:01,729 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-28T09:22:01,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:01,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. as already flushing 2024-11-28T09:22:01,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:01,729 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:01,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:01,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:01,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-28T09:22:01,882 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:01,882 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-28T09:22:01,882 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:01,883 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. as already flushing 2024-11-28T09:22:01,883 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:01,883 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:01,883 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:01,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:01,906 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:01,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45252 deadline: 1732785781904, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:01,907 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:01,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45320 deadline: 1732785781905, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:01,907 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:01,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45338 deadline: 1732785781905, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:01,908 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:01,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45298 deadline: 1732785781906, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:01,911 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:01,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45346 deadline: 1732785781909, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:02,036 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:02,037 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-28T09:22:02,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:02,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. as already flushing 2024-11-28T09:22:02,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:02,037 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:02,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:02,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:02,055 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/675b60891b534886a24e8a9762d211ea 2024-11-28T09:22:02,091 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/5aa7972ff7a44451922f7cf044a63a9d is 50, key is test_row_0/B:col10/1732785721561/Put/seqid=0 2024-11-28T09:22:02,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742129_1305 (size=12001) 2024-11-28T09:22:02,135 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/5aa7972ff7a44451922f7cf044a63a9d 2024-11-28T09:22:02,172 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/11e9418a3168477dad67d93e2b38b674 is 50, key is test_row_0/C:col10/1732785721561/Put/seqid=0 2024-11-28T09:22:02,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-28T09:22:02,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742130_1306 (size=12001) 2024-11-28T09:22:02,189 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:02,193 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-28T09:22:02,193 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:02,193 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. as already flushing 2024-11-28T09:22:02,193 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:02,193 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:02,193 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:02,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:02,210 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:02,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45252 deadline: 1732785782209, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:02,210 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:02,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45320 deadline: 1732785782209, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:02,213 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:02,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45298 deadline: 1732785782211, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:02,213 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:02,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45338 deadline: 1732785782211, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:02,218 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:02,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45346 deadline: 1732785782216, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:02,345 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:02,346 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-28T09:22:02,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:02,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. as already flushing 2024-11-28T09:22:02,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:02,346 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:02,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:02,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:02,499 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:02,499 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-28T09:22:02,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:02,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. as already flushing 2024-11-28T09:22:02,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:02,500 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] handler.RSProcedureHandler(58): pid=72 java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:02,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=72 java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:02,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=72 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:02,584 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/11e9418a3168477dad67d93e2b38b674 2024-11-28T09:22:02,589 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/675b60891b534886a24e8a9762d211ea as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/675b60891b534886a24e8a9762d211ea 2024-11-28T09:22:02,594 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/675b60891b534886a24e8a9762d211ea, entries=150, sequenceid=12, filesize=11.7 K 2024-11-28T09:22:02,595 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/5aa7972ff7a44451922f7cf044a63a9d as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/5aa7972ff7a44451922f7cf044a63a9d 2024-11-28T09:22:02,599 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/5aa7972ff7a44451922f7cf044a63a9d, entries=150, sequenceid=12, filesize=11.7 K 2024-11-28T09:22:02,600 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/11e9418a3168477dad67d93e2b38b674 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/11e9418a3168477dad67d93e2b38b674 2024-11-28T09:22:02,604 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/11e9418a3168477dad67d93e2b38b674, entries=150, sequenceid=12, filesize=11.7 K 2024-11-28T09:22:02,605 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreScanner(1000): StoreScanner already closing. There is no need to updateReaders 2024-11-28T09:22:02,606 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for 0ad9609950a045418498b830dd929869 in 1041ms, sequenceid=12, compaction requested=false 2024-11-28T09:22:02,606 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0ad9609950a045418498b830dd929869: 2024-11-28T09:22:02,653 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:02,653 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-28T09:22:02,654 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:02,654 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2837): Flushing 0ad9609950a045418498b830dd929869 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-28T09:22:02,654 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0ad9609950a045418498b830dd929869, store=A 2024-11-28T09:22:02,654 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:02,654 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0ad9609950a045418498b830dd929869, store=B 2024-11-28T09:22:02,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:02,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0ad9609950a045418498b830dd929869, store=C 2024-11-28T09:22:02,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:02,662 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/5f9446a79bca454e92dc8822f5d12911 is 50, key is test_row_0/A:col10/1732785721585/Put/seqid=0 2024-11-28T09:22:02,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-28T09:22:02,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742131_1307 (size=12001) 2024-11-28T09:22:02,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 0ad9609950a045418498b830dd929869 2024-11-28T09:22:02,715 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. as already flushing 2024-11-28T09:22:02,723 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:02,723 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:02,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45338 deadline: 1732785782720, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:02,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45346 deadline: 1732785782720, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:02,724 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:02,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45298 deadline: 1732785782721, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:02,724 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:02,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45320 deadline: 1732785782721, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:02,724 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:02,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45252 deadline: 1732785782721, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:02,826 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:02,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45338 deadline: 1732785782824, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:02,826 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:02,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45298 deadline: 1732785782825, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:02,827 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:02,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45320 deadline: 1732785782825, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:02,827 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:02,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45252 deadline: 1732785782825, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:03,028 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:03,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45298 deadline: 1732785783027, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:03,029 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:03,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45338 deadline: 1732785783028, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:03,031 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:03,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45252 deadline: 1732785783029, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:03,031 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:03,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45320 deadline: 1732785783030, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:03,095 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/5f9446a79bca454e92dc8822f5d12911 2024-11-28T09:22:03,104 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/e909d4f92e5042fe8cdf89a20162b580 is 50, key is test_row_0/B:col10/1732785721585/Put/seqid=0 2024-11-28T09:22:03,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742132_1308 (size=12001) 2024-11-28T09:22:03,135 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/e909d4f92e5042fe8cdf89a20162b580 2024-11-28T09:22:03,143 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/43ad14a09ef24684aacf717f307bd2a0 is 50, key is test_row_0/C:col10/1732785721585/Put/seqid=0 2024-11-28T09:22:03,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742133_1309 (size=12001) 2024-11-28T09:22:03,174 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/43ad14a09ef24684aacf717f307bd2a0 2024-11-28T09:22:03,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/5f9446a79bca454e92dc8822f5d12911 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/5f9446a79bca454e92dc8822f5d12911 2024-11-28T09:22:03,185 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/5f9446a79bca454e92dc8822f5d12911, entries=150, sequenceid=38, filesize=11.7 K 2024-11-28T09:22:03,186 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/e909d4f92e5042fe8cdf89a20162b580 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/e909d4f92e5042fe8cdf89a20162b580 2024-11-28T09:22:03,195 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/e909d4f92e5042fe8cdf89a20162b580, entries=150, sequenceid=38, filesize=11.7 K 2024-11-28T09:22:03,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/43ad14a09ef24684aacf717f307bd2a0 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/43ad14a09ef24684aacf717f307bd2a0 2024-11-28T09:22:03,203 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/43ad14a09ef24684aacf717f307bd2a0, entries=150, sequenceid=38, filesize=11.7 K 2024-11-28T09:22:03,205 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=53.67 KB/54960 for 0ad9609950a045418498b830dd929869 in 551ms, sequenceid=38, compaction requested=false 2024-11-28T09:22:03,205 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2538): Flush status journal for 0ad9609950a045418498b830dd929869: 2024-11-28T09:22:03,205 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:03,206 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=72 2024-11-28T09:22:03,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4106): Remote procedure done, pid=72 2024-11-28T09:22:03,209 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=72, resume processing ppid=71 2024-11-28T09:22:03,209 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=72, ppid=71, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6310 sec 2024-11-28T09:22:03,211 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=71, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees in 1.6350 sec 2024-11-28T09:22:03,334 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0ad9609950a045418498b830dd929869 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-28T09:22:03,334 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0ad9609950a045418498b830dd929869, store=A 2024-11-28T09:22:03,335 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:03,335 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0ad9609950a045418498b830dd929869, store=B 2024-11-28T09:22:03,335 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:03,335 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0ad9609950a045418498b830dd929869, store=C 2024-11-28T09:22:03,335 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:03,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 0ad9609950a045418498b830dd929869 2024-11-28T09:22:03,341 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/cb877df785dd438aa7ebb9fa0ee7b34f is 50, key is test_row_0/A:col10/1732785723332/Put/seqid=0 2024-11-28T09:22:03,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742134_1310 (size=12001) 2024-11-28T09:22:03,376 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:03,376 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:03,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45320 deadline: 1732785783370, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:03,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45298 deadline: 1732785783370, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:03,377 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:03,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45338 deadline: 1732785783375, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:03,380 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:03,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45252 deadline: 1732785783376, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:03,480 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:03,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45298 deadline: 1732785783478, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:03,481 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:03,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45320 deadline: 1732785783478, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:03,481 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:03,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45338 deadline: 1732785783479, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:03,483 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:03,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45252 deadline: 1732785783481, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:03,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-28T09:22:03,680 INFO [Thread-1372 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 71 completed 2024-11-28T09:22:03,682 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T09:22:03,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] procedure2.ProcedureExecutor(1098): Stored pid=73, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees 2024-11-28T09:22:03,684 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=73, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T09:22:03,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-28T09:22:03,685 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=73, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T09:22:03,685 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=74, ppid=73, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T09:22:03,685 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:03,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45338 deadline: 1732785783683, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:03,687 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:03,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45298 deadline: 1732785783683, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:03,688 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:03,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45252 deadline: 1732785783684, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:03,704 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:03,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45320 deadline: 1732785783683, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:03,735 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:03,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45346 deadline: 1732785783733, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:03,762 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/cb877df785dd438aa7ebb9fa0ee7b34f 2024-11-28T09:22:03,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-28T09:22:03,789 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/f9479b2fbf3c4722b216b8190393aa5f is 50, key is test_row_0/B:col10/1732785723332/Put/seqid=0 2024-11-28T09:22:03,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742135_1311 (size=12001) 2024-11-28T09:22:03,802 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/f9479b2fbf3c4722b216b8190393aa5f 2024-11-28T09:22:03,828 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/56935dcd958e461c856d620ff2bf2aa8 is 50, key is test_row_0/C:col10/1732785723332/Put/seqid=0 2024-11-28T09:22:03,836 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:03,837 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-28T09:22:03,837 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:03,837 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. as already flushing 2024-11-28T09:22:03,837 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:03,837 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] handler.RSProcedureHandler(58): pid=74 java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:03,837 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=74 java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:03,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=74 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:03,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742136_1312 (size=12001) 2024-11-28T09:22:03,862 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/56935dcd958e461c856d620ff2bf2aa8 2024-11-28T09:22:03,869 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/cb877df785dd438aa7ebb9fa0ee7b34f as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/cb877df785dd438aa7ebb9fa0ee7b34f 2024-11-28T09:22:03,877 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/cb877df785dd438aa7ebb9fa0ee7b34f, entries=150, sequenceid=51, filesize=11.7 K 2024-11-28T09:22:03,888 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/f9479b2fbf3c4722b216b8190393aa5f as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/f9479b2fbf3c4722b216b8190393aa5f 2024-11-28T09:22:03,893 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/f9479b2fbf3c4722b216b8190393aa5f, entries=150, sequenceid=51, filesize=11.7 K 2024-11-28T09:22:03,894 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/56935dcd958e461c856d620ff2bf2aa8 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/56935dcd958e461c856d620ff2bf2aa8 2024-11-28T09:22:03,899 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/56935dcd958e461c856d620ff2bf2aa8, entries=150, sequenceid=51, filesize=11.7 K 2024-11-28T09:22:03,900 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 0ad9609950a045418498b830dd929869 in 566ms, sequenceid=51, compaction requested=true 2024-11-28T09:22:03,900 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0ad9609950a045418498b830dd929869: 2024-11-28T09:22:03,900 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0ad9609950a045418498b830dd929869:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T09:22:03,900 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:22:03,900 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0ad9609950a045418498b830dd929869:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T09:22:03,900 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:22:03,900 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:22:03,900 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0ad9609950a045418498b830dd929869:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T09:22:03,900 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:22:03,900 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:22:03,903 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:22:03,903 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1540): 0ad9609950a045418498b830dd929869/A is initiating minor compaction (all files) 2024-11-28T09:22:03,903 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0ad9609950a045418498b830dd929869/A in TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:03,903 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/675b60891b534886a24e8a9762d211ea, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/5f9446a79bca454e92dc8822f5d12911, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/cb877df785dd438aa7ebb9fa0ee7b34f] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp, totalSize=35.2 K 2024-11-28T09:22:03,904 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:22:03,904 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): 0ad9609950a045418498b830dd929869/B is initiating minor compaction (all files) 2024-11-28T09:22:03,904 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0ad9609950a045418498b830dd929869/B in TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:03,905 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/5aa7972ff7a44451922f7cf044a63a9d, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/e909d4f92e5042fe8cdf89a20162b580, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/f9479b2fbf3c4722b216b8190393aa5f] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp, totalSize=35.2 K 2024-11-28T09:22:03,905 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 675b60891b534886a24e8a9762d211ea, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=1732785721558 2024-11-28T09:22:03,905 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 5aa7972ff7a44451922f7cf044a63a9d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=1732785721558 2024-11-28T09:22:03,906 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5f9446a79bca454e92dc8822f5d12911, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1732785721585 2024-11-28T09:22:03,906 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting e909d4f92e5042fe8cdf89a20162b580, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1732785721585 2024-11-28T09:22:03,907 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting cb877df785dd438aa7ebb9fa0ee7b34f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732785722719 2024-11-28T09:22:03,907 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting f9479b2fbf3c4722b216b8190393aa5f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732785722719 2024-11-28T09:22:03,922 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0ad9609950a045418498b830dd929869#A#compaction#258 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:22:03,923 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/42cb03a9f89e4a58829b79da7f541aff is 50, key is test_row_0/A:col10/1732785723332/Put/seqid=0 2024-11-28T09:22:03,926 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0ad9609950a045418498b830dd929869#B#compaction#259 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:22:03,927 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/602bef74cfc04108bebe4b16bc59e4cc is 50, key is test_row_0/B:col10/1732785723332/Put/seqid=0 2024-11-28T09:22:03,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742137_1313 (size=12104) 2024-11-28T09:22:03,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742138_1314 (size=12104) 2024-11-28T09:22:03,991 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:03,991 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-28T09:22:03,992 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:03,992 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2837): Flushing 0ad9609950a045418498b830dd929869 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-28T09:22:03,992 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/602bef74cfc04108bebe4b16bc59e4cc as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/602bef74cfc04108bebe4b16bc59e4cc 2024-11-28T09:22:03,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-28T09:22:03,993 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0ad9609950a045418498b830dd929869, store=A 2024-11-28T09:22:03,994 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. as already flushing 2024-11-28T09:22:03,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 0ad9609950a045418498b830dd929869 2024-11-28T09:22:03,996 DEBUG [master/363d8d38a970:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 4cf15397e80ca5505a26ba84c5dd3d7f changed from -1.0 to 0.0, refreshing cache 2024-11-28T09:22:03,997 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:03,997 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0ad9609950a045418498b830dd929869, store=B 2024-11-28T09:22:03,997 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:03,997 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0ad9609950a045418498b830dd929869, store=C 2024-11-28T09:22:03,997 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:03,999 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0ad9609950a045418498b830dd929869/B of 0ad9609950a045418498b830dd929869 into 602bef74cfc04108bebe4b16bc59e4cc(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:22:03,999 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0ad9609950a045418498b830dd929869: 2024-11-28T09:22:03,999 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869., storeName=0ad9609950a045418498b830dd929869/B, priority=13, startTime=1732785723900; duration=0sec 2024-11-28T09:22:03,999 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:22:03,999 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0ad9609950a045418498b830dd929869:B 2024-11-28T09:22:03,999 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:22:04,002 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:22:04,002 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): 0ad9609950a045418498b830dd929869/C is initiating minor compaction (all files) 2024-11-28T09:22:04,002 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0ad9609950a045418498b830dd929869/C in TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:04,002 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/11e9418a3168477dad67d93e2b38b674, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/43ad14a09ef24684aacf717f307bd2a0, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/56935dcd958e461c856d620ff2bf2aa8] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp, totalSize=35.2 K 2024-11-28T09:22:04,002 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 11e9418a3168477dad67d93e2b38b674, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=1732785721558 2024-11-28T09:22:04,002 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 43ad14a09ef24684aacf717f307bd2a0, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1732785721585 2024-11-28T09:22:04,003 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 56935dcd958e461c856d620ff2bf2aa8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732785722719 2024-11-28T09:22:04,018 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:04,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45338 deadline: 1732785784015, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:04,019 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:04,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45298 deadline: 1732785784015, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:04,022 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:04,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45252 deadline: 1732785784018, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:04,022 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:04,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45320 deadline: 1732785784019, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:04,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/f255eb4b525f4ba19001249f045636c4 is 50, key is test_row_0/A:col10/1732785723991/Put/seqid=0 2024-11-28T09:22:04,037 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0ad9609950a045418498b830dd929869#C#compaction#261 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:22:04,037 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/ebfce1014db04b60a7fb3decd876fc5c is 50, key is test_row_0/C:col10/1732785723332/Put/seqid=0 2024-11-28T09:22:04,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742140_1316 (size=12001) 2024-11-28T09:22:04,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742139_1315 (size=12104) 2024-11-28T09:22:04,094 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=75 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/f255eb4b525f4ba19001249f045636c4 2024-11-28T09:22:04,122 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/330c9c9716d34458b951df4bbb700357 is 50, key is test_row_0/B:col10/1732785723991/Put/seqid=0 2024-11-28T09:22:04,123 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:04,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45338 deadline: 1732785784120, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:04,123 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:04,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45298 deadline: 1732785784120, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:04,127 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:04,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45320 deadline: 1732785784124, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:04,127 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:04,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45252 deadline: 1732785784124, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:04,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742141_1317 (size=12001) 2024-11-28T09:22:04,158 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=75 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/330c9c9716d34458b951df4bbb700357 2024-11-28T09:22:04,186 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/08ad9ed33b13488ba0fc21b6d37da973 is 50, key is test_row_0/C:col10/1732785723991/Put/seqid=0 2024-11-28T09:22:04,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742142_1318 (size=12001) 2024-11-28T09:22:04,243 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=75 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/08ad9ed33b13488ba0fc21b6d37da973 2024-11-28T09:22:04,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/f255eb4b525f4ba19001249f045636c4 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/f255eb4b525f4ba19001249f045636c4 2024-11-28T09:22:04,255 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/f255eb4b525f4ba19001249f045636c4, entries=150, sequenceid=75, filesize=11.7 K 2024-11-28T09:22:04,256 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/330c9c9716d34458b951df4bbb700357 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/330c9c9716d34458b951df4bbb700357 2024-11-28T09:22:04,261 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/330c9c9716d34458b951df4bbb700357, entries=150, sequenceid=75, filesize=11.7 K 2024-11-28T09:22:04,262 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/08ad9ed33b13488ba0fc21b6d37da973 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/08ad9ed33b13488ba0fc21b6d37da973 2024-11-28T09:22:04,275 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/08ad9ed33b13488ba0fc21b6d37da973, entries=150, sequenceid=75, filesize=11.7 K 2024-11-28T09:22:04,276 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 0ad9609950a045418498b830dd929869 in 283ms, sequenceid=75, compaction requested=false 2024-11-28T09:22:04,276 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2538): Flush status journal for 0ad9609950a045418498b830dd929869: 2024-11-28T09:22:04,276 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:04,276 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=74 2024-11-28T09:22:04,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4106): Remote procedure done, pid=74 2024-11-28T09:22:04,281 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=74, resume processing ppid=73 2024-11-28T09:22:04,282 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=74, ppid=73, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 594 msec 2024-11-28T09:22:04,283 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=73, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees in 600 msec 2024-11-28T09:22:04,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-28T09:22:04,296 INFO [Thread-1372 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 73 completed 2024-11-28T09:22:04,298 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T09:22:04,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] procedure2.ProcedureExecutor(1098): Stored pid=75, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees 2024-11-28T09:22:04,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-28T09:22:04,302 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=75, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T09:22:04,302 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=75, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T09:22:04,303 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=76, ppid=75, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T09:22:04,329 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0ad9609950a045418498b830dd929869 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-28T09:22:04,329 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0ad9609950a045418498b830dd929869, store=A 2024-11-28T09:22:04,329 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:04,329 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0ad9609950a045418498b830dd929869, store=B 2024-11-28T09:22:04,329 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:04,329 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0ad9609950a045418498b830dd929869, store=C 2024-11-28T09:22:04,329 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:04,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 0ad9609950a045418498b830dd929869 2024-11-28T09:22:04,345 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/aef9b9acca704228ae05a3416c02af30 is 50, key is test_row_0/A:col10/1732785724001/Put/seqid=0 2024-11-28T09:22:04,367 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/42cb03a9f89e4a58829b79da7f541aff as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/42cb03a9f89e4a58829b79da7f541aff 2024-11-28T09:22:04,393 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0ad9609950a045418498b830dd929869/A of 0ad9609950a045418498b830dd929869 into 42cb03a9f89e4a58829b79da7f541aff(size=11.8 K), total size for store is 23.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:22:04,393 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0ad9609950a045418498b830dd929869: 2024-11-28T09:22:04,393 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869., storeName=0ad9609950a045418498b830dd929869/A, priority=13, startTime=1732785723900; duration=0sec 2024-11-28T09:22:04,393 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:22:04,393 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0ad9609950a045418498b830dd929869:A 2024-11-28T09:22:04,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742143_1319 (size=16681) 2024-11-28T09:22:04,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-28T09:22:04,405 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:04,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45320 deadline: 1732785784394, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:04,406 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:04,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45298 deadline: 1732785784394, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:04,410 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:04,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45252 deadline: 1732785784409, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:04,411 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:04,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45338 deadline: 1732785784409, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:04,455 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:04,455 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-28T09:22:04,455 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:04,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. as already flushing 2024-11-28T09:22:04,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:04,456 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:04,456 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:04,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:04,500 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/ebfce1014db04b60a7fb3decd876fc5c as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/ebfce1014db04b60a7fb3decd876fc5c 2024-11-28T09:22:04,505 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0ad9609950a045418498b830dd929869/C of 0ad9609950a045418498b830dd929869 into ebfce1014db04b60a7fb3decd876fc5c(size=11.8 K), total size for store is 23.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:22:04,505 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0ad9609950a045418498b830dd929869: 2024-11-28T09:22:04,505 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869., storeName=0ad9609950a045418498b830dd929869/C, priority=13, startTime=1732785723900; duration=0sec 2024-11-28T09:22:04,505 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:22:04,505 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0ad9609950a045418498b830dd929869:C 2024-11-28T09:22:04,508 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:04,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45320 deadline: 1732785784507, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:04,509 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:04,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45298 deadline: 1732785784507, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:04,514 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:04,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45338 deadline: 1732785784512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:04,520 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:04,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45252 deadline: 1732785784516, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:04,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-28T09:22:04,610 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:04,610 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-28T09:22:04,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:04,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. as already flushing 2024-11-28T09:22:04,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:04,611 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:04,611 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:04,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:04,649 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-28T09:22:04,712 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:04,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45320 deadline: 1732785784710, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:04,713 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:04,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45298 deadline: 1732785784710, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:04,718 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:04,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45338 deadline: 1732785784716, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:04,722 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:04,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45252 deadline: 1732785784721, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:04,763 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:04,764 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-28T09:22:04,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:04,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. as already flushing 2024-11-28T09:22:04,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:04,764 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:04,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:04,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:04,797 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=90 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/aef9b9acca704228ae05a3416c02af30 2024-11-28T09:22:04,822 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/ec9e224603aa47dd98acae6fe22ca709 is 50, key is test_row_0/B:col10/1732785724001/Put/seqid=0 2024-11-28T09:22:04,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742144_1320 (size=12001) 2024-11-28T09:22:04,867 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=90 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/ec9e224603aa47dd98acae6fe22ca709 2024-11-28T09:22:04,881 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/65b20c2379a348588d46062bb9801e2d is 50, key is test_row_0/C:col10/1732785724001/Put/seqid=0 2024-11-28T09:22:04,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-28T09:22:04,917 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:04,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742145_1321 (size=12001) 2024-11-28T09:22:04,920 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-28T09:22:04,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:04,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. as already flushing 2024-11-28T09:22:04,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:04,921 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:04,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:04,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:05,022 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:05,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45320 deadline: 1732785785015, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:05,023 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:05,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45298 deadline: 1732785785016, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:05,025 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:05,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45338 deadline: 1732785785020, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:05,028 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:05,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45252 deadline: 1732785785026, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:05,075 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:05,075 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-28T09:22:05,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:05,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. as already flushing 2024-11-28T09:22:05,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:05,076 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:05,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:05,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:05,240 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:05,240 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-28T09:22:05,241 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:05,241 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. as already flushing 2024-11-28T09:22:05,241 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:05,241 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:05,241 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:05,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:05,321 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=90 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/65b20c2379a348588d46062bb9801e2d 2024-11-28T09:22:05,376 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/aef9b9acca704228ae05a3416c02af30 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/aef9b9acca704228ae05a3416c02af30 2024-11-28T09:22:05,381 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/aef9b9acca704228ae05a3416c02af30, entries=250, sequenceid=90, filesize=16.3 K 2024-11-28T09:22:05,382 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/ec9e224603aa47dd98acae6fe22ca709 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/ec9e224603aa47dd98acae6fe22ca709 2024-11-28T09:22:05,386 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/ec9e224603aa47dd98acae6fe22ca709, entries=150, sequenceid=90, filesize=11.7 K 2024-11-28T09:22:05,387 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/65b20c2379a348588d46062bb9801e2d as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/65b20c2379a348588d46062bb9801e2d 2024-11-28T09:22:05,390 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/65b20c2379a348588d46062bb9801e2d, entries=150, sequenceid=90, filesize=11.7 K 2024-11-28T09:22:05,391 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 0ad9609950a045418498b830dd929869 in 1062ms, sequenceid=90, compaction requested=true 2024-11-28T09:22:05,392 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0ad9609950a045418498b830dd929869: 2024-11-28T09:22:05,392 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0ad9609950a045418498b830dd929869:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T09:22:05,392 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:22:05,392 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0ad9609950a045418498b830dd929869:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T09:22:05,392 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:22:05,392 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:22:05,392 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0ad9609950a045418498b830dd929869:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T09:22:05,392 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:22:05,392 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:22:05,393 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40786 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:22:05,393 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1540): 0ad9609950a045418498b830dd929869/A is initiating minor compaction (all files) 2024-11-28T09:22:05,393 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0ad9609950a045418498b830dd929869/A in TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:05,393 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/42cb03a9f89e4a58829b79da7f541aff, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/f255eb4b525f4ba19001249f045636c4, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/aef9b9acca704228ae05a3416c02af30] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp, totalSize=39.8 K 2024-11-28T09:22:05,394 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:22:05,394 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 42cb03a9f89e4a58829b79da7f541aff, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732785722719 2024-11-28T09:22:05,394 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): 0ad9609950a045418498b830dd929869/B is initiating minor compaction (all files) 2024-11-28T09:22:05,394 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0ad9609950a045418498b830dd929869/B in TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:05,394 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/602bef74cfc04108bebe4b16bc59e4cc, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/330c9c9716d34458b951df4bbb700357, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/ec9e224603aa47dd98acae6fe22ca709] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp, totalSize=35.3 K 2024-11-28T09:22:05,394 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting f255eb4b525f4ba19001249f045636c4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1732785723368 2024-11-28T09:22:05,394 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 602bef74cfc04108bebe4b16bc59e4cc, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732785722719 2024-11-28T09:22:05,394 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting aef9b9acca704228ae05a3416c02af30, keycount=250, bloomtype=ROW, size=16.3 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1732785724001 2024-11-28T09:22:05,395 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 330c9c9716d34458b951df4bbb700357, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1732785723368 2024-11-28T09:22:05,395 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting ec9e224603aa47dd98acae6fe22ca709, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1732785724001 2024-11-28T09:22:05,396 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:05,397 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-28T09:22:05,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:05,397 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2837): Flushing 0ad9609950a045418498b830dd929869 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-28T09:22:05,397 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0ad9609950a045418498b830dd929869, store=A 2024-11-28T09:22:05,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:05,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0ad9609950a045418498b830dd929869, store=B 2024-11-28T09:22:05,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:05,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0ad9609950a045418498b830dd929869, store=C 2024-11-28T09:22:05,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:05,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-28T09:22:05,411 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0ad9609950a045418498b830dd929869#A#compaction#267 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:22:05,412 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/16a9bfcdd6124e1980c7a6a70a6b1fc0 is 50, key is test_row_0/A:col10/1732785724001/Put/seqid=0 2024-11-28T09:22:05,434 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/2cbc3d42bc2147a89a21abd7719b0e9f is 50, key is test_row_0/A:col10/1732785724394/Put/seqid=0 2024-11-28T09:22:05,439 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0ad9609950a045418498b830dd929869#B#compaction#269 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:22:05,440 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/1bf26449135f43eb82eed2d2f79adfba is 50, key is test_row_0/B:col10/1732785724001/Put/seqid=0 2024-11-28T09:22:05,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742146_1322 (size=12207) 2024-11-28T09:22:05,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742147_1323 (size=12001) 2024-11-28T09:22:05,511 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=114 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/2cbc3d42bc2147a89a21abd7719b0e9f 2024-11-28T09:22:05,529 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. as already flushing 2024-11-28T09:22:05,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 0ad9609950a045418498b830dd929869 2024-11-28T09:22:05,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742148_1324 (size=12207) 2024-11-28T09:22:05,535 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/1bf26449135f43eb82eed2d2f79adfba as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/1bf26449135f43eb82eed2d2f79adfba 2024-11-28T09:22:05,539 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/93392686e147409c81a2aecbaa1e082f is 50, key is test_row_0/B:col10/1732785724394/Put/seqid=0 2024-11-28T09:22:05,540 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0ad9609950a045418498b830dd929869/B of 0ad9609950a045418498b830dd929869 into 1bf26449135f43eb82eed2d2f79adfba(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:22:05,540 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0ad9609950a045418498b830dd929869: 2024-11-28T09:22:05,540 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869., storeName=0ad9609950a045418498b830dd929869/B, priority=13, startTime=1732785725392; duration=0sec 2024-11-28T09:22:05,540 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:22:05,540 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0ad9609950a045418498b830dd929869:B 2024-11-28T09:22:05,540 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:22:05,542 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:22:05,542 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): 0ad9609950a045418498b830dd929869/C is initiating minor compaction (all files) 2024-11-28T09:22:05,542 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0ad9609950a045418498b830dd929869/C in TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:05,542 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/ebfce1014db04b60a7fb3decd876fc5c, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/08ad9ed33b13488ba0fc21b6d37da973, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/65b20c2379a348588d46062bb9801e2d] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp, totalSize=35.3 K 2024-11-28T09:22:05,543 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting ebfce1014db04b60a7fb3decd876fc5c, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732785722719 2024-11-28T09:22:05,544 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 08ad9ed33b13488ba0fc21b6d37da973, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1732785723368 2024-11-28T09:22:05,544 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 65b20c2379a348588d46062bb9801e2d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1732785724001 2024-11-28T09:22:05,562 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:05,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45320 deadline: 1732785785552, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:05,565 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:05,565 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:05,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45252 deadline: 1732785785558, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:05,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45298 deadline: 1732785785559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:05,565 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:05,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45338 deadline: 1732785785559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:05,574 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0ad9609950a045418498b830dd929869#C#compaction#271 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:22:05,575 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/4b69419dee204a189c25e7584117aa1e is 50, key is test_row_0/C:col10/1732785724001/Put/seqid=0 2024-11-28T09:22:05,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742149_1325 (size=12001) 2024-11-28T09:22:05,580 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=114 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/93392686e147409c81a2aecbaa1e082f 2024-11-28T09:22:05,598 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/d5c3bb0cb7a949dfa2cc054ac09e8493 is 50, key is test_row_0/C:col10/1732785724394/Put/seqid=0 2024-11-28T09:22:05,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742150_1326 (size=12207) 2024-11-28T09:22:05,636 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/4b69419dee204a189c25e7584117aa1e as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/4b69419dee204a189c25e7584117aa1e 2024-11-28T09:22:05,642 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0ad9609950a045418498b830dd929869/C of 0ad9609950a045418498b830dd929869 into 4b69419dee204a189c25e7584117aa1e(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:22:05,642 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0ad9609950a045418498b830dd929869: 2024-11-28T09:22:05,642 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869., storeName=0ad9609950a045418498b830dd929869/C, priority=13, startTime=1732785725392; duration=0sec 2024-11-28T09:22:05,642 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:22:05,642 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0ad9609950a045418498b830dd929869:C 2024-11-28T09:22:05,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742151_1327 (size=12001) 2024-11-28T09:22:05,662 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=114 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/d5c3bb0cb7a949dfa2cc054ac09e8493 2024-11-28T09:22:05,664 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:05,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45320 deadline: 1732785785663, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:05,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/2cbc3d42bc2147a89a21abd7719b0e9f as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/2cbc3d42bc2147a89a21abd7719b0e9f 2024-11-28T09:22:05,669 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:05,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45252 deadline: 1732785785666, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:05,670 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:05,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45298 deadline: 1732785785666, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:05,699 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:05,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45338 deadline: 1732785785666, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:05,703 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/2cbc3d42bc2147a89a21abd7719b0e9f, entries=150, sequenceid=114, filesize=11.7 K 2024-11-28T09:22:05,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/93392686e147409c81a2aecbaa1e082f as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/93392686e147409c81a2aecbaa1e082f 2024-11-28T09:22:05,708 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/93392686e147409c81a2aecbaa1e082f, entries=150, sequenceid=114, filesize=11.7 K 2024-11-28T09:22:05,710 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/d5c3bb0cb7a949dfa2cc054ac09e8493 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/d5c3bb0cb7a949dfa2cc054ac09e8493 2024-11-28T09:22:05,714 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/d5c3bb0cb7a949dfa2cc054ac09e8493, entries=150, sequenceid=114, filesize=11.7 K 2024-11-28T09:22:05,715 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=87.22 KB/89310 for 0ad9609950a045418498b830dd929869 in 318ms, sequenceid=114, compaction requested=false 2024-11-28T09:22:05,715 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2538): Flush status journal for 0ad9609950a045418498b830dd929869: 2024-11-28T09:22:05,715 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:05,715 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=76 2024-11-28T09:22:05,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4106): Remote procedure done, pid=76 2024-11-28T09:22:05,719 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=76, resume processing ppid=75 2024-11-28T09:22:05,719 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=76, ppid=75, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4130 sec 2024-11-28T09:22:05,721 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=75, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees in 1.4220 sec 2024-11-28T09:22:05,750 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0ad9609950a045418498b830dd929869 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-11-28T09:22:05,750 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0ad9609950a045418498b830dd929869, store=A 2024-11-28T09:22:05,750 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:05,750 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0ad9609950a045418498b830dd929869, store=B 2024-11-28T09:22:05,750 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:05,750 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0ad9609950a045418498b830dd929869, store=C 2024-11-28T09:22:05,750 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:05,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 0ad9609950a045418498b830dd929869 2024-11-28T09:22:05,769 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/424d140980b04ff191b26863e6f5df87 is 50, key is test_row_0/A:col10/1732785725748/Put/seqid=0 2024-11-28T09:22:05,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742152_1328 (size=14441) 2024-11-28T09:22:05,802 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=133 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/424d140980b04ff191b26863e6f5df87 2024-11-28T09:22:05,835 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:05,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45346 deadline: 1732785785831, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:05,836 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/154260b9c72a4e8196feff8f36077a62 is 50, key is test_row_0/B:col10/1732785725748/Put/seqid=0 2024-11-28T09:22:05,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742153_1329 (size=12101) 2024-11-28T09:22:05,873 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:05,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45252 deadline: 1732785785873, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:05,874 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:05,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45320 deadline: 1732785785873, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:05,889 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/16a9bfcdd6124e1980c7a6a70a6b1fc0 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/16a9bfcdd6124e1980c7a6a70a6b1fc0 2024-11-28T09:22:05,895 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0ad9609950a045418498b830dd929869/A of 0ad9609950a045418498b830dd929869 into 16a9bfcdd6124e1980c7a6a70a6b1fc0(size=11.9 K), total size for store is 23.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:22:05,895 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0ad9609950a045418498b830dd929869: 2024-11-28T09:22:05,895 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869., storeName=0ad9609950a045418498b830dd929869/A, priority=13, startTime=1732785725392; duration=0sec 2024-11-28T09:22:05,895 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:22:05,895 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0ad9609950a045418498b830dd929869:A 2024-11-28T09:22:05,904 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:05,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45298 deadline: 1732785785903, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:05,905 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:05,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45338 deadline: 1732785785904, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:05,941 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:05,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45346 deadline: 1732785785940, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:06,145 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:06,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45346 deadline: 1732785786144, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:06,177 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:06,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45320 deadline: 1732785786176, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:06,177 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:06,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45252 deadline: 1732785786176, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:06,209 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:06,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45298 deadline: 1732785786207, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:06,211 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:06,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45338 deadline: 1732785786208, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:06,261 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=133 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/154260b9c72a4e8196feff8f36077a62 2024-11-28T09:22:06,268 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/d7ebf9da0c8e41e4a55381822bf2f984 is 50, key is test_row_0/C:col10/1732785725748/Put/seqid=0 2024-11-28T09:22:06,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742154_1330 (size=12101) 2024-11-28T09:22:06,301 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=133 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/d7ebf9da0c8e41e4a55381822bf2f984 2024-11-28T09:22:06,308 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/424d140980b04ff191b26863e6f5df87 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/424d140980b04ff191b26863e6f5df87 2024-11-28T09:22:06,312 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/424d140980b04ff191b26863e6f5df87, entries=200, sequenceid=133, filesize=14.1 K 2024-11-28T09:22:06,313 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/154260b9c72a4e8196feff8f36077a62 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/154260b9c72a4e8196feff8f36077a62 2024-11-28T09:22:06,319 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/154260b9c72a4e8196feff8f36077a62, entries=150, sequenceid=133, filesize=11.8 K 2024-11-28T09:22:06,322 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/d7ebf9da0c8e41e4a55381822bf2f984 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/d7ebf9da0c8e41e4a55381822bf2f984 2024-11-28T09:22:06,329 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/d7ebf9da0c8e41e4a55381822bf2f984, entries=150, sequenceid=133, filesize=11.8 K 2024-11-28T09:22:06,330 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for 0ad9609950a045418498b830dd929869 in 581ms, sequenceid=133, compaction requested=true 2024-11-28T09:22:06,330 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0ad9609950a045418498b830dd929869: 2024-11-28T09:22:06,330 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:22:06,331 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0ad9609950a045418498b830dd929869:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T09:22:06,332 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38649 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:22:06,332 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1540): 0ad9609950a045418498b830dd929869/A is initiating minor compaction (all files) 2024-11-28T09:22:06,332 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0ad9609950a045418498b830dd929869/A in TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:06,332 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/16a9bfcdd6124e1980c7a6a70a6b1fc0, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/2cbc3d42bc2147a89a21abd7719b0e9f, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/424d140980b04ff191b26863e6f5df87] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp, totalSize=37.7 K 2024-11-28T09:22:06,332 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:22:06,332 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:22:06,333 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 16a9bfcdd6124e1980c7a6a70a6b1fc0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1732785724001 2024-11-28T09:22:06,333 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0ad9609950a045418498b830dd929869:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T09:22:06,333 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:22:06,333 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2cbc3d42bc2147a89a21abd7719b0e9f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=114, earliestPutTs=1732785724390 2024-11-28T09:22:06,333 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0ad9609950a045418498b830dd929869:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T09:22:06,333 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:22:06,334 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36309 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:22:06,334 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): 0ad9609950a045418498b830dd929869/B is initiating minor compaction (all files) 2024-11-28T09:22:06,334 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0ad9609950a045418498b830dd929869/B in TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:06,334 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/1bf26449135f43eb82eed2d2f79adfba, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/93392686e147409c81a2aecbaa1e082f, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/154260b9c72a4e8196feff8f36077a62] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp, totalSize=35.5 K 2024-11-28T09:22:06,334 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 424d140980b04ff191b26863e6f5df87, keycount=200, bloomtype=ROW, size=14.1 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1732785725544 2024-11-28T09:22:06,334 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 1bf26449135f43eb82eed2d2f79adfba, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1732785724001 2024-11-28T09:22:06,335 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 93392686e147409c81a2aecbaa1e082f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=114, earliestPutTs=1732785724390 2024-11-28T09:22:06,336 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 154260b9c72a4e8196feff8f36077a62, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1732785725551 2024-11-28T09:22:06,366 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0ad9609950a045418498b830dd929869#A#compaction#276 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:22:06,367 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/5a4a3b223a1044adbe744dcadd0f2370 is 50, key is test_row_0/A:col10/1732785725748/Put/seqid=0 2024-11-28T09:22:06,370 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0ad9609950a045418498b830dd929869#B#compaction#277 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:22:06,370 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/7b003e8864994048bddf11fd2bb0196a is 50, key is test_row_0/B:col10/1732785725748/Put/seqid=0 2024-11-28T09:22:06,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742155_1331 (size=12409) 2024-11-28T09:22:06,396 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/5a4a3b223a1044adbe744dcadd0f2370 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/5a4a3b223a1044adbe744dcadd0f2370 2024-11-28T09:22:06,402 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0ad9609950a045418498b830dd929869/A of 0ad9609950a045418498b830dd929869 into 5a4a3b223a1044adbe744dcadd0f2370(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:22:06,402 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0ad9609950a045418498b830dd929869: 2024-11-28T09:22:06,402 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869., storeName=0ad9609950a045418498b830dd929869/A, priority=13, startTime=1732785726330; duration=0sec 2024-11-28T09:22:06,402 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:22:06,402 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0ad9609950a045418498b830dd929869:A 2024-11-28T09:22:06,402 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:22:06,403 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36309 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:22:06,403 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1540): 0ad9609950a045418498b830dd929869/C is initiating minor compaction (all files) 2024-11-28T09:22:06,403 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0ad9609950a045418498b830dd929869/C in TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:06,404 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/4b69419dee204a189c25e7584117aa1e, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/d5c3bb0cb7a949dfa2cc054ac09e8493, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/d7ebf9da0c8e41e4a55381822bf2f984] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp, totalSize=35.5 K 2024-11-28T09:22:06,404 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4b69419dee204a189c25e7584117aa1e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=90, earliestPutTs=1732785724001 2024-11-28T09:22:06,405 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting d5c3bb0cb7a949dfa2cc054ac09e8493, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=114, earliestPutTs=1732785724390 2024-11-28T09:22:06,405 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting d7ebf9da0c8e41e4a55381822bf2f984, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1732785725551 2024-11-28T09:22:06,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-28T09:22:06,407 INFO [Thread-1372 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 75 completed 2024-11-28T09:22:06,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742156_1332 (size=12409) 2024-11-28T09:22:06,412 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T09:22:06,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] procedure2.ProcedureExecutor(1098): Stored pid=77, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees 2024-11-28T09:22:06,414 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=77, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T09:22:06,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-28T09:22:06,415 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=77, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T09:22:06,415 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=78, ppid=77, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T09:22:06,426 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0ad9609950a045418498b830dd929869#C#compaction#278 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:22:06,427 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/84ef6f3bb14245f6932191c3c9594901 is 50, key is test_row_0/C:col10/1732785725748/Put/seqid=0 2024-11-28T09:22:06,436 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/7b003e8864994048bddf11fd2bb0196a as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/7b003e8864994048bddf11fd2bb0196a 2024-11-28T09:22:06,442 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0ad9609950a045418498b830dd929869/B of 0ad9609950a045418498b830dd929869 into 7b003e8864994048bddf11fd2bb0196a(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:22:06,442 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0ad9609950a045418498b830dd929869: 2024-11-28T09:22:06,442 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869., storeName=0ad9609950a045418498b830dd929869/B, priority=13, startTime=1732785726332; duration=0sec 2024-11-28T09:22:06,443 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:22:06,443 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0ad9609950a045418498b830dd929869:B 2024-11-28T09:22:06,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 0ad9609950a045418498b830dd929869 2024-11-28T09:22:06,450 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0ad9609950a045418498b830dd929869 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-28T09:22:06,451 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0ad9609950a045418498b830dd929869, store=A 2024-11-28T09:22:06,451 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:06,451 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0ad9609950a045418498b830dd929869, store=B 2024-11-28T09:22:06,451 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:06,451 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0ad9609950a045418498b830dd929869, store=C 2024-11-28T09:22:06,451 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:06,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742157_1333 (size=12409) 2024-11-28T09:22:06,458 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/78112ce154f040fd9e3e1f443f76e08d is 50, key is test_row_0/A:col10/1732785725830/Put/seqid=0 2024-11-28T09:22:06,461 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/84ef6f3bb14245f6932191c3c9594901 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/84ef6f3bb14245f6932191c3c9594901 2024-11-28T09:22:06,480 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0ad9609950a045418498b830dd929869/C of 0ad9609950a045418498b830dd929869 into 84ef6f3bb14245f6932191c3c9594901(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:22:06,480 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0ad9609950a045418498b830dd929869: 2024-11-28T09:22:06,481 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869., storeName=0ad9609950a045418498b830dd929869/C, priority=13, startTime=1732785726333; duration=0sec 2024-11-28T09:22:06,481 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:22:06,481 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0ad9609950a045418498b830dd929869:C 2024-11-28T09:22:06,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742158_1334 (size=14541) 2024-11-28T09:22:06,505 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=156 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/78112ce154f040fd9e3e1f443f76e08d 2024-11-28T09:22:06,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-28T09:22:06,520 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:06,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45346 deadline: 1732785786518, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:06,525 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/552729a8a77d420da9d4e941165be690 is 50, key is test_row_0/B:col10/1732785725830/Put/seqid=0 2024-11-28T09:22:06,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742159_1335 (size=12151) 2024-11-28T09:22:06,563 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=156 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/552729a8a77d420da9d4e941165be690 2024-11-28T09:22:06,566 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:06,566 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-28T09:22:06,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:06,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. as already flushing 2024-11-28T09:22:06,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:06,567 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:06,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:06,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:06,586 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/7593e57fe5824f70a775212d804a1273 is 50, key is test_row_0/C:col10/1732785725830/Put/seqid=0 2024-11-28T09:22:06,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742160_1336 (size=12151) 2024-11-28T09:22:06,623 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:06,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45346 deadline: 1732785786622, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:06,681 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:06,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45252 deadline: 1732785786680, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:06,682 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:06,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45320 deadline: 1732785786681, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:06,712 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:06,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45298 deadline: 1732785786710, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:06,714 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:06,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45338 deadline: 1732785786713, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:06,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-28T09:22:06,720 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:06,720 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-28T09:22:06,722 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:06,722 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. as already flushing 2024-11-28T09:22:06,722 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:06,722 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:06,722 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:06,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:06,826 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:06,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45346 deadline: 1732785786824, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:06,874 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:06,874 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-28T09:22:06,875 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:06,875 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. as already flushing 2024-11-28T09:22:06,875 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:06,875 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:06,875 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:06,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:07,003 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=156 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/7593e57fe5824f70a775212d804a1273 2024-11-28T09:22:07,009 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/78112ce154f040fd9e3e1f443f76e08d as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/78112ce154f040fd9e3e1f443f76e08d 2024-11-28T09:22:07,013 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/78112ce154f040fd9e3e1f443f76e08d, entries=200, sequenceid=156, filesize=14.2 K 2024-11-28T09:22:07,014 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/552729a8a77d420da9d4e941165be690 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/552729a8a77d420da9d4e941165be690 2024-11-28T09:22:07,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-28T09:22:07,019 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/552729a8a77d420da9d4e941165be690, entries=150, sequenceid=156, filesize=11.9 K 2024-11-28T09:22:07,021 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/7593e57fe5824f70a775212d804a1273 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/7593e57fe5824f70a775212d804a1273 2024-11-28T09:22:07,027 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/7593e57fe5824f70a775212d804a1273, entries=150, sequenceid=156, filesize=11.9 K 2024-11-28T09:22:07,028 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for 0ad9609950a045418498b830dd929869 in 578ms, sequenceid=156, compaction requested=false 2024-11-28T09:22:07,029 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0ad9609950a045418498b830dd929869: 2024-11-28T09:22:07,032 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:07,032 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-28T09:22:07,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:07,033 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2837): Flushing 0ad9609950a045418498b830dd929869 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-28T09:22:07,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0ad9609950a045418498b830dd929869, store=A 2024-11-28T09:22:07,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:07,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0ad9609950a045418498b830dd929869, store=B 2024-11-28T09:22:07,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:07,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0ad9609950a045418498b830dd929869, store=C 2024-11-28T09:22:07,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:07,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/93ad89726bcb4300a7123ea0b973b7ac is 50, key is test_row_0/A:col10/1732785726517/Put/seqid=0 2024-11-28T09:22:07,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742161_1337 (size=12151) 2024-11-28T09:22:07,098 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=173 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/93ad89726bcb4300a7123ea0b973b7ac 2024-11-28T09:22:07,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/305c39fba3be447891c467540e7d22d8 is 50, key is test_row_0/B:col10/1732785726517/Put/seqid=0 2024-11-28T09:22:07,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 0ad9609950a045418498b830dd929869 2024-11-28T09:22:07,131 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. as already flushing 2024-11-28T09:22:07,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742162_1338 (size=12151) 2024-11-28T09:22:07,203 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:07,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45346 deadline: 1732785787200, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:07,306 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:07,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45346 deadline: 1732785787304, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:07,512 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:07,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45346 deadline: 1732785787510, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:07,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-28T09:22:07,551 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=173 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/305c39fba3be447891c467540e7d22d8 2024-11-28T09:22:07,572 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/714de24ef91f49bfbee10817c9e75f2e is 50, key is test_row_0/C:col10/1732785726517/Put/seqid=0 2024-11-28T09:22:07,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742163_1339 (size=12151) 2024-11-28T09:22:07,594 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=173 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/714de24ef91f49bfbee10817c9e75f2e 2024-11-28T09:22:07,607 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/93ad89726bcb4300a7123ea0b973b7ac as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/93ad89726bcb4300a7123ea0b973b7ac 2024-11-28T09:22:07,613 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/93ad89726bcb4300a7123ea0b973b7ac, entries=150, sequenceid=173, filesize=11.9 K 2024-11-28T09:22:07,615 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/305c39fba3be447891c467540e7d22d8 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/305c39fba3be447891c467540e7d22d8 2024-11-28T09:22:07,626 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/305c39fba3be447891c467540e7d22d8, entries=150, sequenceid=173, filesize=11.9 K 2024-11-28T09:22:07,627 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/714de24ef91f49bfbee10817c9e75f2e as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/714de24ef91f49bfbee10817c9e75f2e 2024-11-28T09:22:07,631 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/714de24ef91f49bfbee10817c9e75f2e, entries=150, sequenceid=173, filesize=11.9 K 2024-11-28T09:22:07,632 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for 0ad9609950a045418498b830dd929869 in 599ms, sequenceid=173, compaction requested=true 2024-11-28T09:22:07,632 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2538): Flush status journal for 0ad9609950a045418498b830dd929869: 2024-11-28T09:22:07,632 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:07,632 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=78 2024-11-28T09:22:07,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4106): Remote procedure done, pid=78 2024-11-28T09:22:07,635 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=78, resume processing ppid=77 2024-11-28T09:22:07,635 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=78, ppid=77, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2190 sec 2024-11-28T09:22:07,637 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=77, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees in 1.2230 sec 2024-11-28T09:22:07,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 0ad9609950a045418498b830dd929869 2024-11-28T09:22:07,688 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0ad9609950a045418498b830dd929869 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-28T09:22:07,688 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0ad9609950a045418498b830dd929869, store=A 2024-11-28T09:22:07,688 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:07,688 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0ad9609950a045418498b830dd929869, store=B 2024-11-28T09:22:07,688 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:07,688 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0ad9609950a045418498b830dd929869, store=C 2024-11-28T09:22:07,688 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:07,694 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/36d116bb52114d908999194b9cf3dea1 is 50, key is test_row_0/A:col10/1732785727686/Put/seqid=0 2024-11-28T09:22:07,719 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:07,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45320 deadline: 1732785787715, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:07,719 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:07,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45338 deadline: 1732785787717, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:07,721 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:07,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45298 deadline: 1732785787718, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:07,721 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:07,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45252 deadline: 1732785787718, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:07,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742164_1340 (size=14541) 2024-11-28T09:22:07,815 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:07,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45346 deadline: 1732785787813, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:07,820 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:07,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45320 deadline: 1732785787820, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:07,824 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:07,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45252 deadline: 1732785787822, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:08,024 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:08,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45320 deadline: 1732785788022, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:08,028 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:08,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45252 deadline: 1732785788026, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:08,130 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=194 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/36d116bb52114d908999194b9cf3dea1 2024-11-28T09:22:08,142 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/d121da5913d44600ab5e6e8ce3b734d8 is 50, key is test_row_0/B:col10/1732785727686/Put/seqid=0 2024-11-28T09:22:08,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742165_1341 (size=12151) 2024-11-28T09:22:08,152 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=194 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/d121da5913d44600ab5e6e8ce3b734d8 2024-11-28T09:22:08,159 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/83ebbd03e6684d61816ab21ec061c413 is 50, key is test_row_0/C:col10/1732785727686/Put/seqid=0 2024-11-28T09:22:08,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742166_1342 (size=12151) 2024-11-28T09:22:08,182 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=194 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/83ebbd03e6684d61816ab21ec061c413 2024-11-28T09:22:08,189 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/36d116bb52114d908999194b9cf3dea1 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/36d116bb52114d908999194b9cf3dea1 2024-11-28T09:22:08,194 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/36d116bb52114d908999194b9cf3dea1, entries=200, sequenceid=194, filesize=14.2 K 2024-11-28T09:22:08,195 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/d121da5913d44600ab5e6e8ce3b734d8 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/d121da5913d44600ab5e6e8ce3b734d8 2024-11-28T09:22:08,211 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/d121da5913d44600ab5e6e8ce3b734d8, entries=150, sequenceid=194, filesize=11.9 K 2024-11-28T09:22:08,213 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/83ebbd03e6684d61816ab21ec061c413 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/83ebbd03e6684d61816ab21ec061c413 2024-11-28T09:22:08,218 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/83ebbd03e6684d61816ab21ec061c413, entries=150, sequenceid=194, filesize=11.9 K 2024-11-28T09:22:08,219 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for 0ad9609950a045418498b830dd929869 in 531ms, sequenceid=194, compaction requested=true 2024-11-28T09:22:08,219 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0ad9609950a045418498b830dd929869: 2024-11-28T09:22:08,219 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0ad9609950a045418498b830dd929869:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T09:22:08,219 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:22:08,219 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T09:22:08,219 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T09:22:08,219 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0ad9609950a045418498b830dd929869:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T09:22:08,219 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:22:08,219 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0ad9609950a045418498b830dd929869:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T09:22:08,219 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:22:08,220 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 53642 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T09:22:08,221 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1540): 0ad9609950a045418498b830dd929869/A is initiating minor compaction (all files) 2024-11-28T09:22:08,221 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48862 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T09:22:08,221 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0ad9609950a045418498b830dd929869/A in TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:08,221 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): 0ad9609950a045418498b830dd929869/B is initiating minor compaction (all files) 2024-11-28T09:22:08,221 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/5a4a3b223a1044adbe744dcadd0f2370, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/78112ce154f040fd9e3e1f443f76e08d, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/93ad89726bcb4300a7123ea0b973b7ac, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/36d116bb52114d908999194b9cf3dea1] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp, totalSize=52.4 K 2024-11-28T09:22:08,221 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0ad9609950a045418498b830dd929869/B in TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:08,221 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/7b003e8864994048bddf11fd2bb0196a, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/552729a8a77d420da9d4e941165be690, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/305c39fba3be447891c467540e7d22d8, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/d121da5913d44600ab5e6e8ce3b734d8] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp, totalSize=47.7 K 2024-11-28T09:22:08,221 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5a4a3b223a1044adbe744dcadd0f2370, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1732785725551 2024-11-28T09:22:08,221 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 7b003e8864994048bddf11fd2bb0196a, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1732785725551 2024-11-28T09:22:08,222 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 78112ce154f040fd9e3e1f443f76e08d, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1732785725812 2024-11-28T09:22:08,222 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 552729a8a77d420da9d4e941165be690, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1732785725825 2024-11-28T09:22:08,222 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 93ad89726bcb4300a7123ea0b973b7ac, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1732785726488 2024-11-28T09:22:08,222 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 305c39fba3be447891c467540e7d22d8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1732785726488 2024-11-28T09:22:08,222 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 36d116bb52114d908999194b9cf3dea1, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1732785727180 2024-11-28T09:22:08,223 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting d121da5913d44600ab5e6e8ce3b734d8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1732785727194 2024-11-28T09:22:08,246 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0ad9609950a045418498b830dd929869#A#compaction#288 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:22:08,247 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/55d60a3ae77e4356bb77567623fcd4f8 is 50, key is test_row_0/A:col10/1732785727686/Put/seqid=0 2024-11-28T09:22:08,255 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0ad9609950a045418498b830dd929869#B#compaction#289 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:22:08,255 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/19625573a5f8440d8608087983437e86 is 50, key is test_row_0/B:col10/1732785727686/Put/seqid=0 2024-11-28T09:22:08,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742167_1343 (size=12595) 2024-11-28T09:22:08,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742168_1344 (size=12595) 2024-11-28T09:22:08,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 0ad9609950a045418498b830dd929869 2024-11-28T09:22:08,321 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0ad9609950a045418498b830dd929869 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-28T09:22:08,321 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0ad9609950a045418498b830dd929869, store=A 2024-11-28T09:22:08,321 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:08,321 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0ad9609950a045418498b830dd929869, store=B 2024-11-28T09:22:08,321 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:08,321 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0ad9609950a045418498b830dd929869, store=C 2024-11-28T09:22:08,321 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:08,326 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/abdbdc02e8004d299c06f24a2298d27f is 50, key is test_row_0/A:col10/1732785727704/Put/seqid=0 2024-11-28T09:22:08,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742169_1345 (size=12151) 2024-11-28T09:22:08,340 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=210 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/abdbdc02e8004d299c06f24a2298d27f 2024-11-28T09:22:08,356 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:08,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45346 deadline: 1732785788353, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:08,358 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:08,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45320 deadline: 1732785788355, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:08,359 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:08,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45252 deadline: 1732785788356, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:08,366 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/5f18b2b3dde64ad6b95917779ce4502d is 50, key is test_row_0/B:col10/1732785727704/Put/seqid=0 2024-11-28T09:22:08,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742170_1346 (size=12151) 2024-11-28T09:22:08,375 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=210 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/5f18b2b3dde64ad6b95917779ce4502d 2024-11-28T09:22:08,383 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/cdd1bc576a014d2293c484fa70f97649 is 50, key is test_row_0/C:col10/1732785727704/Put/seqid=0 2024-11-28T09:22:08,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742171_1347 (size=12151) 2024-11-28T09:22:08,392 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=210 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/cdd1bc576a014d2293c484fa70f97649 2024-11-28T09:22:08,396 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/abdbdc02e8004d299c06f24a2298d27f as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/abdbdc02e8004d299c06f24a2298d27f 2024-11-28T09:22:08,400 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/abdbdc02e8004d299c06f24a2298d27f, entries=150, sequenceid=210, filesize=11.9 K 2024-11-28T09:22:08,401 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/5f18b2b3dde64ad6b95917779ce4502d as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/5f18b2b3dde64ad6b95917779ce4502d 2024-11-28T09:22:08,407 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/5f18b2b3dde64ad6b95917779ce4502d, entries=150, sequenceid=210, filesize=11.9 K 2024-11-28T09:22:08,409 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/cdd1bc576a014d2293c484fa70f97649 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/cdd1bc576a014d2293c484fa70f97649 2024-11-28T09:22:08,413 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/cdd1bc576a014d2293c484fa70f97649, entries=150, sequenceid=210, filesize=11.9 K 2024-11-28T09:22:08,415 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for 0ad9609950a045418498b830dd929869 in 93ms, sequenceid=210, compaction requested=true 2024-11-28T09:22:08,415 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0ad9609950a045418498b830dd929869: 2024-11-28T09:22:08,415 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0ad9609950a045418498b830dd929869:A, priority=-2147483648, current under compaction store size is 3 2024-11-28T09:22:08,415 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-28T09:22:08,415 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0ad9609950a045418498b830dd929869:B, priority=-2147483648, current under compaction store size is 3 2024-11-28T09:22:08,415 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-11-28T09:22:08,415 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0ad9609950a045418498b830dd929869:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T09:22:08,415 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=4), splitQueue=0 2024-11-28T09:22:08,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 0ad9609950a045418498b830dd929869 2024-11-28T09:22:08,461 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0ad9609950a045418498b830dd929869 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-28T09:22:08,462 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0ad9609950a045418498b830dd929869, store=A 2024-11-28T09:22:08,462 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:08,462 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0ad9609950a045418498b830dd929869, store=B 2024-11-28T09:22:08,462 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:08,462 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0ad9609950a045418498b830dd929869, store=C 2024-11-28T09:22:08,462 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:08,481 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/e87a4dff73df41fba87662a447a3cff3 is 50, key is test_row_0/A:col10/1732785728460/Put/seqid=0 2024-11-28T09:22:08,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742172_1348 (size=16931) 2024-11-28T09:22:08,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-28T09:22:08,518 INFO [Thread-1372 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 77 completed 2024-11-28T09:22:08,520 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T09:22:08,520 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:08,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45252 deadline: 1732785788514, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:08,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] procedure2.ProcedureExecutor(1098): Stored pid=79, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees 2024-11-28T09:22:08,521 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:08,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45320 deadline: 1732785788515, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:08,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-28T09:22:08,522 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=79, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T09:22:08,523 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=79, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T09:22:08,523 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=80, ppid=79, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T09:22:08,525 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:08,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45346 deadline: 1732785788520, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:08,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-28T09:22:08,624 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:08,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45252 deadline: 1732785788622, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:08,624 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:08,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45320 deadline: 1732785788622, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:08,628 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:08,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45346 deadline: 1732785788626, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:08,674 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:08,675 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-28T09:22:08,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:08,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. as already flushing 2024-11-28T09:22:08,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:08,675 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:08,676 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:08,676 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/55d60a3ae77e4356bb77567623fcd4f8 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/55d60a3ae77e4356bb77567623fcd4f8 2024-11-28T09:22:08,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:08,684 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 0ad9609950a045418498b830dd929869/A of 0ad9609950a045418498b830dd929869 into 55d60a3ae77e4356bb77567623fcd4f8(size=12.3 K), total size for store is 24.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:22:08,684 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0ad9609950a045418498b830dd929869: 2024-11-28T09:22:08,684 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869., storeName=0ad9609950a045418498b830dd929869/A, priority=12, startTime=1732785728219; duration=0sec 2024-11-28T09:22:08,684 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=4), splitQueue=0 2024-11-28T09:22:08,684 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0ad9609950a045418498b830dd929869:A 2024-11-28T09:22:08,684 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0ad9609950a045418498b830dd929869:A 2024-11-28T09:22:08,684 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-11-28T09:22:08,687 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 61013 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-11-28T09:22:08,687 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1540): 0ad9609950a045418498b830dd929869/C is initiating minor compaction (all files) 2024-11-28T09:22:08,687 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0ad9609950a045418498b830dd929869/C in TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:08,687 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/84ef6f3bb14245f6932191c3c9594901, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/7593e57fe5824f70a775212d804a1273, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/714de24ef91f49bfbee10817c9e75f2e, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/83ebbd03e6684d61816ab21ec061c413, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/cdd1bc576a014d2293c484fa70f97649] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp, totalSize=59.6 K 2024-11-28T09:22:08,688 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 84ef6f3bb14245f6932191c3c9594901, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1732785725551 2024-11-28T09:22:08,688 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7593e57fe5824f70a775212d804a1273, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1732785725825 2024-11-28T09:22:08,689 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 714de24ef91f49bfbee10817c9e75f2e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1732785726488 2024-11-28T09:22:08,689 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 83ebbd03e6684d61816ab21ec061c413, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1732785727194 2024-11-28T09:22:08,689 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting cdd1bc576a014d2293c484fa70f97649, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1732785727704 2024-11-28T09:22:08,706 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/19625573a5f8440d8608087983437e86 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/19625573a5f8440d8608087983437e86 2024-11-28T09:22:08,712 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0ad9609950a045418498b830dd929869#C#compaction#294 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:22:08,712 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/be3bc9641ea5492289b7e95b777a988f is 50, key is test_row_0/C:col10/1732785727704/Put/seqid=0 2024-11-28T09:22:08,712 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 0ad9609950a045418498b830dd929869/B of 0ad9609950a045418498b830dd929869 into 19625573a5f8440d8608087983437e86(size=12.3 K), total size for store is 24.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:22:08,713 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0ad9609950a045418498b830dd929869: 2024-11-28T09:22:08,713 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869., storeName=0ad9609950a045418498b830dd929869/B, priority=12, startTime=1732785728219; duration=0sec 2024-11-28T09:22:08,713 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-11-28T09:22:08,713 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0ad9609950a045418498b830dd929869:B 2024-11-28T09:22:08,713 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0ad9609950a045418498b830dd929869:B 2024-11-28T09:22:08,713 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 5 compacting, 0 eligible, 16 blocking 2024-11-28T09:22:08,713 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-28T09:22:08,713 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-28T09:22:08,713 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. because compaction request was cancelled 2024-11-28T09:22:08,713 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0ad9609950a045418498b830dd929869:C 2024-11-28T09:22:08,713 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-11-28T09:22:08,715 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-28T09:22:08,715 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-28T09:22:08,715 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. because compaction request was cancelled 2024-11-28T09:22:08,715 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0ad9609950a045418498b830dd929869:B 2024-11-28T09:22:08,715 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-11-28T09:22:08,716 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-28T09:22:08,716 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-28T09:22:08,716 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. because compaction request was cancelled 2024-11-28T09:22:08,716 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0ad9609950a045418498b830dd929869:A 2024-11-28T09:22:08,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742173_1349 (size=12629) 2024-11-28T09:22:08,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-28T09:22:08,828 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:08,829 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-28T09:22:08,829 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:08,829 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. as already flushing 2024-11-28T09:22:08,829 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:08,829 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:08,829 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:08,829 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:08,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45320 deadline: 1732785788825, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:08,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:08,830 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:08,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45252 deadline: 1732785788826, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:08,835 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:08,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45346 deadline: 1732785788831, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:08,886 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=232 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/e87a4dff73df41fba87662a447a3cff3 2024-11-28T09:22:08,895 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/d9a41a27c61c43649373df255bfa676d is 50, key is test_row_0/B:col10/1732785728460/Put/seqid=0 2024-11-28T09:22:08,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742174_1350 (size=12151) 2024-11-28T09:22:08,944 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=232 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/d9a41a27c61c43649373df255bfa676d 2024-11-28T09:22:08,954 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/0b7ac173f3754f5d8765b742ce4834d7 is 50, key is test_row_0/C:col10/1732785728460/Put/seqid=0 2024-11-28T09:22:08,982 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:08,982 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-28T09:22:08,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:08,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. as already flushing 2024-11-28T09:22:08,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:08,983 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:08,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:08,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:09,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742175_1351 (size=12151) 2024-11-28T09:22:09,030 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=232 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/0b7ac173f3754f5d8765b742ce4834d7 2024-11-28T09:22:09,040 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/e87a4dff73df41fba87662a447a3cff3 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/e87a4dff73df41fba87662a447a3cff3 2024-11-28T09:22:09,052 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/e87a4dff73df41fba87662a447a3cff3, entries=250, sequenceid=232, filesize=16.5 K 2024-11-28T09:22:09,053 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/d9a41a27c61c43649373df255bfa676d as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/d9a41a27c61c43649373df255bfa676d 2024-11-28T09:22:09,057 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/d9a41a27c61c43649373df255bfa676d, entries=150, sequenceid=232, filesize=11.9 K 2024-11-28T09:22:09,058 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/0b7ac173f3754f5d8765b742ce4834d7 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/0b7ac173f3754f5d8765b742ce4834d7 2024-11-28T09:22:09,062 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/0b7ac173f3754f5d8765b742ce4834d7, entries=150, sequenceid=232, filesize=11.9 K 2024-11-28T09:22:09,063 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 0ad9609950a045418498b830dd929869 in 602ms, sequenceid=232, compaction requested=true 2024-11-28T09:22:09,063 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0ad9609950a045418498b830dd929869: 2024-11-28T09:22:09,064 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0ad9609950a045418498b830dd929869:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T09:22:09,064 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:22:09,064 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0ad9609950a045418498b830dd929869:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T09:22:09,064 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:22:09,064 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0ad9609950a045418498b830dd929869:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T09:22:09,064 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:22:09,064 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-28T09:22:09,065 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 41677 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:22:09,065 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): 0ad9609950a045418498b830dd929869/A is initiating minor compaction (all files) 2024-11-28T09:22:09,065 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0ad9609950a045418498b830dd929869/A in TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:09,065 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/55d60a3ae77e4356bb77567623fcd4f8, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/abdbdc02e8004d299c06f24a2298d27f, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/e87a4dff73df41fba87662a447a3cff3] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp, totalSize=40.7 K 2024-11-28T09:22:09,065 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 55d60a3ae77e4356bb77567623fcd4f8, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1732785727194 2024-11-28T09:22:09,066 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting abdbdc02e8004d299c06f24a2298d27f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1732785727704 2024-11-28T09:22:09,067 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting e87a4dff73df41fba87662a447a3cff3, keycount=250, bloomtype=ROW, size=16.5 K, encoding=NONE, compression=NONE, seqNum=232, earliestPutTs=1732785728351 2024-11-28T09:22:09,092 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0ad9609950a045418498b830dd929869#A#compaction#297 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:22:09,093 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/b7af2c0b90b94516bebc9523c421f1bb is 50, key is test_row_0/A:col10/1732785728460/Put/seqid=0 2024-11-28T09:22:09,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742176_1352 (size=12697) 2024-11-28T09:22:09,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-28T09:22:09,127 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/b7af2c0b90b94516bebc9523c421f1bb as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/b7af2c0b90b94516bebc9523c421f1bb 2024-11-28T09:22:09,133 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/be3bc9641ea5492289b7e95b777a988f as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/be3bc9641ea5492289b7e95b777a988f 2024-11-28T09:22:09,136 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0ad9609950a045418498b830dd929869 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-28T09:22:09,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 0ad9609950a045418498b830dd929869 2024-11-28T09:22:09,137 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0ad9609950a045418498b830dd929869/A of 0ad9609950a045418498b830dd929869 into b7af2c0b90b94516bebc9523c421f1bb(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:22:09,137 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0ad9609950a045418498b830dd929869: 2024-11-28T09:22:09,137 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869., storeName=0ad9609950a045418498b830dd929869/A, priority=13, startTime=1732785729063; duration=0sec 2024-11-28T09:22:09,137 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-28T09:22:09,137 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0ad9609950a045418498b830dd929869:A 2024-11-28T09:22:09,138 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 6 store files, 5 compacting, 1 eligible, 16 blocking 2024-11-28T09:22:09,138 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0ad9609950a045418498b830dd929869, store=A 2024-11-28T09:22:09,138 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:09,138 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0ad9609950a045418498b830dd929869, store=B 2024-11-28T09:22:09,138 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:09,138 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0ad9609950a045418498b830dd929869, store=C 2024-11-28T09:22:09,138 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:09,138 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-28T09:22:09,138 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-28T09:22:09,138 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. because compaction request was cancelled 2024-11-28T09:22:09,139 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0ad9609950a045418498b830dd929869:C 2024-11-28T09:22:09,139 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:22:09,141 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:22:09,141 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): 0ad9609950a045418498b830dd929869/B is initiating minor compaction (all files) 2024-11-28T09:22:09,141 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0ad9609950a045418498b830dd929869/B in TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:09,141 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/19625573a5f8440d8608087983437e86, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/5f18b2b3dde64ad6b95917779ce4502d, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/d9a41a27c61c43649373df255bfa676d] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp, totalSize=36.0 K 2024-11-28T09:22:09,142 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 19625573a5f8440d8608087983437e86, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=194, earliestPutTs=1732785727194 2024-11-28T09:22:09,142 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 5f18b2b3dde64ad6b95917779ce4502d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1732785727704 2024-11-28T09:22:09,143 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting d9a41a27c61c43649373df255bfa676d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=232, earliestPutTs=1732785728355 2024-11-28T09:22:09,144 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in 0ad9609950a045418498b830dd929869/C of 0ad9609950a045418498b830dd929869 into be3bc9641ea5492289b7e95b777a988f(size=12.3 K), total size for store is 24.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:22:09,145 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0ad9609950a045418498b830dd929869: 2024-11-28T09:22:09,145 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869., storeName=0ad9609950a045418498b830dd929869/C, priority=11, startTime=1732785728415; duration=0sec 2024-11-28T09:22:09,145 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:22:09,145 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0ad9609950a045418498b830dd929869:C 2024-11-28T09:22:09,151 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/07533d667578455da841b163e49a5098 is 50, key is test_row_1/A:col10/1732785728518/Put/seqid=0 2024-11-28T09:22:09,153 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0ad9609950a045418498b830dd929869#B#compaction#299 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:22:09,154 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/745ec3c8afae4a0babf52ea19d1b2ca4 is 50, key is test_row_0/B:col10/1732785728460/Put/seqid=0 2024-11-28T09:22:09,165 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:09,165 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-28T09:22:09,166 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:09,166 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. as already flushing 2024-11-28T09:22:09,166 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:09,166 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:09,166 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:09,167 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:09,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742178_1354 (size=12147) 2024-11-28T09:22:09,200 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:09,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45252 deadline: 1732785789197, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:09,200 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:09,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45346 deadline: 1732785789197, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:09,200 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:09,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45320 deadline: 1732785789198, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:09,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742177_1353 (size=12697) 2024-11-28T09:22:09,305 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:09,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45320 deadline: 1732785789302, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:09,305 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:09,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45252 deadline: 1732785789302, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:09,306 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:09,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45346 deadline: 1732785789303, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:09,318 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:09,319 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-28T09:22:09,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:09,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. as already flushing 2024-11-28T09:22:09,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:09,319 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:09,319 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:09,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:09,472 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:09,472 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-28T09:22:09,473 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:09,473 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. as already flushing 2024-11-28T09:22:09,473 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:09,473 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:09,473 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:09,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:09,508 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:09,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45320 deadline: 1732785789506, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:09,513 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:09,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45252 deadline: 1732785789511, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:09,521 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:09,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45346 deadline: 1732785789520, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:09,597 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=251 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/07533d667578455da841b163e49a5098 2024-11-28T09:22:09,606 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/6fb987abdc904cdcb5ecf67ec5ce4268 is 50, key is test_row_1/B:col10/1732785728518/Put/seqid=0 2024-11-28T09:22:09,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-28T09:22:09,630 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/745ec3c8afae4a0babf52ea19d1b2ca4 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/745ec3c8afae4a0babf52ea19d1b2ca4 2024-11-28T09:22:09,631 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:09,631 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-28T09:22:09,631 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:09,631 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. as already flushing 2024-11-28T09:22:09,631 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:09,632 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:09,632 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:09,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:09,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742179_1355 (size=9757) 2024-11-28T09:22:09,637 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=251 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/6fb987abdc904cdcb5ecf67ec5ce4268 2024-11-28T09:22:09,638 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0ad9609950a045418498b830dd929869/B of 0ad9609950a045418498b830dd929869 into 745ec3c8afae4a0babf52ea19d1b2ca4(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:22:09,638 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0ad9609950a045418498b830dd929869: 2024-11-28T09:22:09,638 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869., storeName=0ad9609950a045418498b830dd929869/B, priority=13, startTime=1732785729064; duration=0sec 2024-11-28T09:22:09,638 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:22:09,638 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0ad9609950a045418498b830dd929869:B 2024-11-28T09:22:09,646 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/c3e1f18cf2a844d48e5c5eab36e6471e is 50, key is test_row_1/C:col10/1732785728518/Put/seqid=0 2024-11-28T09:22:09,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742180_1356 (size=9757) 2024-11-28T09:22:09,727 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:09,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45298 deadline: 1732785789726, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:09,728 DEBUG [Thread-1368 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4169 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869., hostname=363d8d38a970,33819,1732785660637, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-28T09:22:09,737 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:09,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45338 deadline: 1732785789737, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:09,738 DEBUG [Thread-1366 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4179 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869., hostname=363d8d38a970,33819,1732785660637, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-28T09:22:09,784 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:09,784 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-28T09:22:09,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:09,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. as already flushing 2024-11-28T09:22:09,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:09,785 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:09,785 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:09,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:09,814 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:09,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45320 deadline: 1732785789813, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:09,817 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:09,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45252 deadline: 1732785789816, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:09,823 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:09,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45346 deadline: 1732785789822, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:09,937 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:09,938 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-28T09:22:09,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:09,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. as already flushing 2024-11-28T09:22:09,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:09,938 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:09,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:09,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:10,072 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=251 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/c3e1f18cf2a844d48e5c5eab36e6471e 2024-11-28T09:22:10,078 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/07533d667578455da841b163e49a5098 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/07533d667578455da841b163e49a5098 2024-11-28T09:22:10,083 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/07533d667578455da841b163e49a5098, entries=150, sequenceid=251, filesize=11.9 K 2024-11-28T09:22:10,084 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/6fb987abdc904cdcb5ecf67ec5ce4268 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/6fb987abdc904cdcb5ecf67ec5ce4268 2024-11-28T09:22:10,088 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/6fb987abdc904cdcb5ecf67ec5ce4268, entries=100, sequenceid=251, filesize=9.5 K 2024-11-28T09:22:10,089 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/c3e1f18cf2a844d48e5c5eab36e6471e as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/c3e1f18cf2a844d48e5c5eab36e6471e 2024-11-28T09:22:10,090 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:10,090 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-28T09:22:10,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:10,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. as already flushing 2024-11-28T09:22:10,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:10,091 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:10,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:10,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:10,096 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/c3e1f18cf2a844d48e5c5eab36e6471e, entries=100, sequenceid=251, filesize=9.5 K 2024-11-28T09:22:10,097 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for 0ad9609950a045418498b830dd929869 in 961ms, sequenceid=251, compaction requested=true 2024-11-28T09:22:10,097 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0ad9609950a045418498b830dd929869: 2024-11-28T09:22:10,097 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-11-28T09:22:10,097 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0ad9609950a045418498b830dd929869:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T09:22:10,097 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:22:10,097 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0ad9609950a045418498b830dd929869:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T09:22:10,097 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:22:10,097 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0ad9609950a045418498b830dd929869:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T09:22:10,097 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:22:10,097 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-11-28T09:22:10,098 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-28T09:22:10,098 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-28T09:22:10,098 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. because compaction request was cancelled 2024-11-28T09:22:10,098 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0ad9609950a045418498b830dd929869:A 2024-11-28T09:22:10,098 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:22:10,098 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-28T09:22:10,098 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-28T09:22:10,098 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. because compaction request was cancelled 2024-11-28T09:22:10,098 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0ad9609950a045418498b830dd929869:B 2024-11-28T09:22:10,099 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34537 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:22:10,099 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): 0ad9609950a045418498b830dd929869/C is initiating minor compaction (all files) 2024-11-28T09:22:10,099 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0ad9609950a045418498b830dd929869/C in TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:10,099 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/be3bc9641ea5492289b7e95b777a988f, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/0b7ac173f3754f5d8765b742ce4834d7, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/c3e1f18cf2a844d48e5c5eab36e6471e] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp, totalSize=33.7 K 2024-11-28T09:22:10,100 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting be3bc9641ea5492289b7e95b777a988f, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1732785727704 2024-11-28T09:22:10,100 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 0b7ac173f3754f5d8765b742ce4834d7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=232, earliestPutTs=1732785728355 2024-11-28T09:22:10,101 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting c3e1f18cf2a844d48e5c5eab36e6471e, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1732785728518 2024-11-28T09:22:10,110 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0ad9609950a045418498b830dd929869#C#compaction#302 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:22:10,110 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/b1f2fcb49b0d4837aaf8d5ac26a21c95 is 50, key is test_row_0/C:col10/1732785728460/Put/seqid=0 2024-11-28T09:22:10,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742181_1357 (size=12731) 2024-11-28T09:22:10,135 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/b1f2fcb49b0d4837aaf8d5ac26a21c95 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/b1f2fcb49b0d4837aaf8d5ac26a21c95 2024-11-28T09:22:10,141 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0ad9609950a045418498b830dd929869/C of 0ad9609950a045418498b830dd929869 into b1f2fcb49b0d4837aaf8d5ac26a21c95(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:22:10,142 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0ad9609950a045418498b830dd929869: 2024-11-28T09:22:10,142 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869., storeName=0ad9609950a045418498b830dd929869/C, priority=13, startTime=1732785730097; duration=0sec 2024-11-28T09:22:10,142 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:22:10,142 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0ad9609950a045418498b830dd929869:C 2024-11-28T09:22:10,243 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:10,245 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-28T09:22:10,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:10,245 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2837): Flushing 0ad9609950a045418498b830dd929869 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-28T09:22:10,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0ad9609950a045418498b830dd929869, store=A 2024-11-28T09:22:10,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:10,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0ad9609950a045418498b830dd929869, store=B 2024-11-28T09:22:10,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:10,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0ad9609950a045418498b830dd929869, store=C 2024-11-28T09:22:10,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:10,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/ba1ee478c2544492a7ebcaa6f3add6c7 is 50, key is test_row_0/A:col10/1732785729185/Put/seqid=0 2024-11-28T09:22:10,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742182_1358 (size=12301) 2024-11-28T09:22:10,271 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=274 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/ba1ee478c2544492a7ebcaa6f3add6c7 2024-11-28T09:22:10,282 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/b12a95c4f8354e4f90451ec326f3da70 is 50, key is test_row_0/B:col10/1732785729185/Put/seqid=0 2024-11-28T09:22:10,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742183_1359 (size=12301) 2024-11-28T09:22:10,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 0ad9609950a045418498b830dd929869 2024-11-28T09:22:10,318 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. as already flushing 2024-11-28T09:22:10,334 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:10,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45252 deadline: 1732785790332, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:10,335 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:10,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45320 deadline: 1732785790333, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:10,341 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:10,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45346 deadline: 1732785790339, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:10,390 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-11-28T09:22:10,390 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees Metrics about Tables on a single HBase RegionServer 2024-11-28T09:22:10,437 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:10,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45320 deadline: 1732785790436, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:10,441 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:10,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45252 deadline: 1732785790439, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:10,443 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:10,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45346 deadline: 1732785790442, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:10,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-28T09:22:10,641 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:10,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45320 deadline: 1732785790640, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:10,644 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:10,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45252 deadline: 1732785790642, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:10,647 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:10,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45346 deadline: 1732785790645, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:10,688 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=274 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/b12a95c4f8354e4f90451ec326f3da70 2024-11-28T09:22:10,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/63c3ed31bed24ecea50cc43b7600f623 is 50, key is test_row_0/C:col10/1732785729185/Put/seqid=0 2024-11-28T09:22:10,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742184_1360 (size=12301) 2024-11-28T09:22:10,946 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:10,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45320 deadline: 1732785790944, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:10,949 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:10,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45252 deadline: 1732785790947, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:10,952 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:10,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45346 deadline: 1732785790950, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:11,123 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=274 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/63c3ed31bed24ecea50cc43b7600f623 2024-11-28T09:22:11,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/ba1ee478c2544492a7ebcaa6f3add6c7 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/ba1ee478c2544492a7ebcaa6f3add6c7 2024-11-28T09:22:11,134 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/ba1ee478c2544492a7ebcaa6f3add6c7, entries=150, sequenceid=274, filesize=12.0 K 2024-11-28T09:22:11,135 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/b12a95c4f8354e4f90451ec326f3da70 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/b12a95c4f8354e4f90451ec326f3da70 2024-11-28T09:22:11,139 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/b12a95c4f8354e4f90451ec326f3da70, entries=150, sequenceid=274, filesize=12.0 K 2024-11-28T09:22:11,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/63c3ed31bed24ecea50cc43b7600f623 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/63c3ed31bed24ecea50cc43b7600f623 2024-11-28T09:22:11,143 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/63c3ed31bed24ecea50cc43b7600f623, entries=150, sequenceid=274, filesize=12.0 K 2024-11-28T09:22:11,144 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=93.93 KB/96180 for 0ad9609950a045418498b830dd929869 in 899ms, sequenceid=274, compaction requested=true 2024-11-28T09:22:11,144 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2538): Flush status journal for 0ad9609950a045418498b830dd929869: 2024-11-28T09:22:11,144 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:11,144 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=80 2024-11-28T09:22:11,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4106): Remote procedure done, pid=80 2024-11-28T09:22:11,147 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=80, resume processing ppid=79 2024-11-28T09:22:11,147 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=80, ppid=79, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.6220 sec 2024-11-28T09:22:11,149 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=79, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees in 2.6270 sec 2024-11-28T09:22:11,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 0ad9609950a045418498b830dd929869 2024-11-28T09:22:11,454 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0ad9609950a045418498b830dd929869 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-11-28T09:22:11,455 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0ad9609950a045418498b830dd929869, store=A 2024-11-28T09:22:11,455 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:11,455 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0ad9609950a045418498b830dd929869, store=B 2024-11-28T09:22:11,455 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:11,455 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0ad9609950a045418498b830dd929869, store=C 2024-11-28T09:22:11,455 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:11,461 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/d19c5b1806e14641b40584f192ede476 is 50, key is test_row_0/A:col10/1732785730331/Put/seqid=0 2024-11-28T09:22:11,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742185_1361 (size=14741) 2024-11-28T09:22:11,467 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=293 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/d19c5b1806e14641b40584f192ede476 2024-11-28T09:22:11,477 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/cbaf09dcf1eb4cc7906c142bb84a8dad is 50, key is test_row_0/B:col10/1732785730331/Put/seqid=0 2024-11-28T09:22:11,485 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:11,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45346 deadline: 1732785791481, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:11,485 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:11,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45320 deadline: 1732785791482, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:11,486 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:11,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45252 deadline: 1732785791482, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:11,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742186_1362 (size=12301) 2024-11-28T09:22:11,587 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:11,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45346 deadline: 1732785791587, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:11,589 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:11,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45252 deadline: 1732785791587, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:11,589 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:11,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45320 deadline: 1732785791587, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:11,792 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:11,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45346 deadline: 1732785791790, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:11,793 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:11,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45252 deadline: 1732785791790, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:11,794 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:11,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45320 deadline: 1732785791791, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:11,925 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=293 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/cbaf09dcf1eb4cc7906c142bb84a8dad 2024-11-28T09:22:11,934 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/06c6f7f8412048deb639cf42b8df5861 is 50, key is test_row_0/C:col10/1732785730331/Put/seqid=0 2024-11-28T09:22:11,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742187_1363 (size=12301) 2024-11-28T09:22:11,990 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=293 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/06c6f7f8412048deb639cf42b8df5861 2024-11-28T09:22:11,997 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/d19c5b1806e14641b40584f192ede476 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/d19c5b1806e14641b40584f192ede476 2024-11-28T09:22:12,002 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/d19c5b1806e14641b40584f192ede476, entries=200, sequenceid=293, filesize=14.4 K 2024-11-28T09:22:12,002 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/cbaf09dcf1eb4cc7906c142bb84a8dad as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/cbaf09dcf1eb4cc7906c142bb84a8dad 2024-11-28T09:22:12,021 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/cbaf09dcf1eb4cc7906c142bb84a8dad, entries=150, sequenceid=293, filesize=12.0 K 2024-11-28T09:22:12,022 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/06c6f7f8412048deb639cf42b8df5861 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/06c6f7f8412048deb639cf42b8df5861 2024-11-28T09:22:12,028 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/06c6f7f8412048deb639cf42b8df5861, entries=150, sequenceid=293, filesize=12.0 K 2024-11-28T09:22:12,029 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=93.93 KB/96180 for 0ad9609950a045418498b830dd929869 in 575ms, sequenceid=293, compaction requested=true 2024-11-28T09:22:12,029 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0ad9609950a045418498b830dd929869: 2024-11-28T09:22:12,029 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0ad9609950a045418498b830dd929869:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T09:22:12,029 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:22:12,029 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0ad9609950a045418498b830dd929869:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T09:22:12,029 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-28T09:22:12,029 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0ad9609950a045418498b830dd929869:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T09:22:12,029 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-11-28T09:22:12,030 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T09:22:12,030 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T09:22:12,032 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 51886 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T09:22:12,032 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1540): 0ad9609950a045418498b830dd929869/A is initiating minor compaction (all files) 2024-11-28T09:22:12,032 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0ad9609950a045418498b830dd929869/A in TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:12,032 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/b7af2c0b90b94516bebc9523c421f1bb, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/07533d667578455da841b163e49a5098, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/ba1ee478c2544492a7ebcaa6f3add6c7, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/d19c5b1806e14641b40584f192ede476] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp, totalSize=50.7 K 2024-11-28T09:22:12,032 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 47056 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T09:22:12,032 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): 0ad9609950a045418498b830dd929869/B is initiating minor compaction (all files) 2024-11-28T09:22:12,032 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0ad9609950a045418498b830dd929869/B in TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:12,032 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/745ec3c8afae4a0babf52ea19d1b2ca4, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/6fb987abdc904cdcb5ecf67ec5ce4268, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/b12a95c4f8354e4f90451ec326f3da70, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/cbaf09dcf1eb4cc7906c142bb84a8dad] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp, totalSize=46.0 K 2024-11-28T09:22:12,033 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 745ec3c8afae4a0babf52ea19d1b2ca4, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=232, earliestPutTs=1732785728355 2024-11-28T09:22:12,034 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting b7af2c0b90b94516bebc9523c421f1bb, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=232, earliestPutTs=1732785728355 2024-11-28T09:22:12,034 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 6fb987abdc904cdcb5ecf67ec5ce4268, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1732785728518 2024-11-28T09:22:12,034 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 07533d667578455da841b163e49a5098, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1732785728518 2024-11-28T09:22:12,035 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting b12a95c4f8354e4f90451ec326f3da70, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1732785729185 2024-11-28T09:22:12,035 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting ba1ee478c2544492a7ebcaa6f3add6c7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1732785729185 2024-11-28T09:22:12,035 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting cbaf09dcf1eb4cc7906c142bb84a8dad, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=293, earliestPutTs=1732785730331 2024-11-28T09:22:12,035 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting d19c5b1806e14641b40584f192ede476, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=293, earliestPutTs=1732785730331 2024-11-28T09:22:12,052 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0ad9609950a045418498b830dd929869#B#compaction#309 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:22:12,052 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/a7fc82142cf9408e898d72dc7a78af84 is 50, key is test_row_0/B:col10/1732785730331/Put/seqid=0 2024-11-28T09:22:12,075 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0ad9609950a045418498b830dd929869#A#compaction#310 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:22:12,075 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/734abe7c1158428991505d6d4268d530 is 50, key is test_row_0/A:col10/1732785730331/Put/seqid=0 2024-11-28T09:22:12,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742188_1364 (size=12983) 2024-11-28T09:22:12,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742189_1365 (size=12983) 2024-11-28T09:22:12,094 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/734abe7c1158428991505d6d4268d530 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/734abe7c1158428991505d6d4268d530 2024-11-28T09:22:12,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 0ad9609950a045418498b830dd929869 2024-11-28T09:22:12,097 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0ad9609950a045418498b830dd929869 3/3 column families, dataSize=100.63 KB heapSize=264.42 KB 2024-11-28T09:22:12,097 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0ad9609950a045418498b830dd929869, store=A 2024-11-28T09:22:12,097 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:12,098 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0ad9609950a045418498b830dd929869, store=B 2024-11-28T09:22:12,098 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:12,098 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0ad9609950a045418498b830dd929869, store=C 2024-11-28T09:22:12,098 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:12,101 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 0ad9609950a045418498b830dd929869/A of 0ad9609950a045418498b830dd929869 into 734abe7c1158428991505d6d4268d530(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:22:12,101 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0ad9609950a045418498b830dd929869: 2024-11-28T09:22:12,101 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869., storeName=0ad9609950a045418498b830dd929869/A, priority=12, startTime=1732785732029; duration=0sec 2024-11-28T09:22:12,101 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:22:12,102 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0ad9609950a045418498b830dd929869:A 2024-11-28T09:22:12,102 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:22:12,104 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:22:12,104 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/33b4a4fe303543159423a9c80b496cc7 is 50, key is test_row_0/A:col10/1732785731480/Put/seqid=0 2024-11-28T09:22:12,104 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1540): 0ad9609950a045418498b830dd929869/C is initiating minor compaction (all files) 2024-11-28T09:22:12,104 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0ad9609950a045418498b830dd929869/C in TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:12,104 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/b1f2fcb49b0d4837aaf8d5ac26a21c95, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/63c3ed31bed24ecea50cc43b7600f623, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/06c6f7f8412048deb639cf42b8df5861] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp, totalSize=36.5 K 2024-11-28T09:22:12,104 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting b1f2fcb49b0d4837aaf8d5ac26a21c95, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1732785728460 2024-11-28T09:22:12,105 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 63c3ed31bed24ecea50cc43b7600f623, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1732785729185 2024-11-28T09:22:12,106 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 06c6f7f8412048deb639cf42b8df5861, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=293, earliestPutTs=1732785730331 2024-11-28T09:22:12,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742190_1366 (size=14741) 2024-11-28T09:22:12,111 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=311 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/33b4a4fe303543159423a9c80b496cc7 2024-11-28T09:22:12,118 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0ad9609950a045418498b830dd929869#C#compaction#312 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:22:12,118 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/a571ec66d3a34ceb867c1d3d0b4ef012 is 50, key is test_row_0/C:col10/1732785730331/Put/seqid=0 2024-11-28T09:22:12,126 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/ab6aeafb7ed746199c01f07921539f8a is 50, key is test_row_0/B:col10/1732785731480/Put/seqid=0 2024-11-28T09:22:12,128 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:12,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45252 deadline: 1732785792124, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:12,129 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:12,129 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:12,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45346 deadline: 1732785792126, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:12,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45320 deadline: 1732785792126, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:12,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742191_1367 (size=12983) 2024-11-28T09:22:12,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742192_1368 (size=12301) 2024-11-28T09:22:12,153 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=311 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/ab6aeafb7ed746199c01f07921539f8a 2024-11-28T09:22:12,161 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/4c182da125fb4801b7d19d2d942a420b is 50, key is test_row_0/C:col10/1732785731480/Put/seqid=0 2024-11-28T09:22:12,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742193_1369 (size=12301) 2024-11-28T09:22:12,176 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=33.54 KB at sequenceid=311 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/4c182da125fb4801b7d19d2d942a420b 2024-11-28T09:22:12,180 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/33b4a4fe303543159423a9c80b496cc7 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/33b4a4fe303543159423a9c80b496cc7 2024-11-28T09:22:12,184 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/33b4a4fe303543159423a9c80b496cc7, entries=200, sequenceid=311, filesize=14.4 K 2024-11-28T09:22:12,185 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/ab6aeafb7ed746199c01f07921539f8a as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/ab6aeafb7ed746199c01f07921539f8a 2024-11-28T09:22:12,189 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/ab6aeafb7ed746199c01f07921539f8a, entries=150, sequenceid=311, filesize=12.0 K 2024-11-28T09:22:12,190 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/4c182da125fb4801b7d19d2d942a420b as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/4c182da125fb4801b7d19d2d942a420b 2024-11-28T09:22:12,195 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/4c182da125fb4801b7d19d2d942a420b, entries=150, sequenceid=311, filesize=12.0 K 2024-11-28T09:22:12,196 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~100.63 KB/103050, heapSize ~264.38 KB/270720, currentSize=100.63 KB/103050 for 0ad9609950a045418498b830dd929869 in 99ms, sequenceid=311, compaction requested=false 2024-11-28T09:22:12,196 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0ad9609950a045418498b830dd929869: 2024-11-28T09:22:12,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 0ad9609950a045418498b830dd929869 2024-11-28T09:22:12,232 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0ad9609950a045418498b830dd929869 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-11-28T09:22:12,233 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0ad9609950a045418498b830dd929869, store=A 2024-11-28T09:22:12,233 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:12,233 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0ad9609950a045418498b830dd929869, store=B 2024-11-28T09:22:12,233 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:12,233 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0ad9609950a045418498b830dd929869, store=C 2024-11-28T09:22:12,233 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:12,238 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/a760833c19e04816a745bcf909b85799 is 50, key is test_row_0/A:col10/1732785732120/Put/seqid=0 2024-11-28T09:22:12,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742194_1370 (size=12301) 2024-11-28T09:22:12,246 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=332 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/a760833c19e04816a745bcf909b85799 2024-11-28T09:22:12,254 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/382987eecedc4f5e8ce3de66d5771eca is 50, key is test_row_0/B:col10/1732785732120/Put/seqid=0 2024-11-28T09:22:12,254 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:12,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45252 deadline: 1732785792251, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:12,255 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:12,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45346 deadline: 1732785792252, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:12,256 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:12,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45320 deadline: 1732785792252, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:12,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742195_1371 (size=12301) 2024-11-28T09:22:12,258 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=332 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/382987eecedc4f5e8ce3de66d5771eca 2024-11-28T09:22:12,264 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/1bb76c477831479fab5c030a114a205b is 50, key is test_row_0/C:col10/1732785732120/Put/seqid=0 2024-11-28T09:22:12,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742196_1372 (size=12301) 2024-11-28T09:22:12,357 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:12,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45252 deadline: 1732785792356, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:12,357 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:12,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45346 deadline: 1732785792357, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:12,357 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:12,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45320 deadline: 1732785792357, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:12,484 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/a7fc82142cf9408e898d72dc7a78af84 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/a7fc82142cf9408e898d72dc7a78af84 2024-11-28T09:22:12,491 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 0ad9609950a045418498b830dd929869/B of 0ad9609950a045418498b830dd929869 into a7fc82142cf9408e898d72dc7a78af84(size=12.7 K), total size for store is 24.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:22:12,491 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0ad9609950a045418498b830dd929869: 2024-11-28T09:22:12,491 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869., storeName=0ad9609950a045418498b830dd929869/B, priority=12, startTime=1732785732029; duration=0sec 2024-11-28T09:22:12,491 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:22:12,491 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0ad9609950a045418498b830dd929869:B 2024-11-28T09:22:12,557 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/a571ec66d3a34ceb867c1d3d0b4ef012 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/a571ec66d3a34ceb867c1d3d0b4ef012 2024-11-28T09:22:12,560 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:12,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45252 deadline: 1732785792558, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:12,560 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:12,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45320 deadline: 1732785792558, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:12,562 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:12,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45346 deadline: 1732785792559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:12,564 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0ad9609950a045418498b830dd929869/C of 0ad9609950a045418498b830dd929869 into a571ec66d3a34ceb867c1d3d0b4ef012(size=12.7 K), total size for store is 24.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:22:12,564 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0ad9609950a045418498b830dd929869: 2024-11-28T09:22:12,564 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869., storeName=0ad9609950a045418498b830dd929869/C, priority=13, startTime=1732785732029; duration=0sec 2024-11-28T09:22:12,564 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:22:12,564 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0ad9609950a045418498b830dd929869:C 2024-11-28T09:22:12,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-28T09:22:12,629 INFO [Thread-1372 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 79 completed 2024-11-28T09:22:12,631 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T09:22:12,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] procedure2.ProcedureExecutor(1098): Stored pid=81, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees 2024-11-28T09:22:12,633 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=81, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T09:22:12,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-28T09:22:12,634 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=81, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T09:22:12,634 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=82, ppid=81, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T09:22:12,668 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=332 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/1bb76c477831479fab5c030a114a205b 2024-11-28T09:22:12,675 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/a760833c19e04816a745bcf909b85799 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/a760833c19e04816a745bcf909b85799 2024-11-28T09:22:12,680 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/a760833c19e04816a745bcf909b85799, entries=150, sequenceid=332, filesize=12.0 K 2024-11-28T09:22:12,681 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/382987eecedc4f5e8ce3de66d5771eca as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/382987eecedc4f5e8ce3de66d5771eca 2024-11-28T09:22:12,687 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/382987eecedc4f5e8ce3de66d5771eca, entries=150, sequenceid=332, filesize=12.0 K 2024-11-28T09:22:12,688 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/1bb76c477831479fab5c030a114a205b as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/1bb76c477831479fab5c030a114a205b 2024-11-28T09:22:12,693 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/1bb76c477831479fab5c030a114a205b, entries=150, sequenceid=332, filesize=12.0 K 2024-11-28T09:22:12,694 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for 0ad9609950a045418498b830dd929869 in 463ms, sequenceid=332, compaction requested=true 2024-11-28T09:22:12,695 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0ad9609950a045418498b830dd929869: 2024-11-28T09:22:12,695 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0ad9609950a045418498b830dd929869:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T09:22:12,695 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:22:12,695 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0ad9609950a045418498b830dd929869:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T09:22:12,695 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:22:12,695 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:22:12,695 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0ad9609950a045418498b830dd929869:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T09:22:12,695 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-28T09:22:12,695 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:22:12,696 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40025 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:22:12,696 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1540): 0ad9609950a045418498b830dd929869/A is initiating minor compaction (all files) 2024-11-28T09:22:12,696 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0ad9609950a045418498b830dd929869/A in TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:12,696 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/734abe7c1158428991505d6d4268d530, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/33b4a4fe303543159423a9c80b496cc7, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/a760833c19e04816a745bcf909b85799] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp, totalSize=39.1 K 2024-11-28T09:22:12,697 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:22:12,697 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): 0ad9609950a045418498b830dd929869/B is initiating minor compaction (all files) 2024-11-28T09:22:12,697 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0ad9609950a045418498b830dd929869/B in TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:12,697 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 734abe7c1158428991505d6d4268d530, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=293, earliestPutTs=1732785730331 2024-11-28T09:22:12,697 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/a7fc82142cf9408e898d72dc7a78af84, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/ab6aeafb7ed746199c01f07921539f8a, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/382987eecedc4f5e8ce3de66d5771eca] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp, totalSize=36.7 K 2024-11-28T09:22:12,697 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 33b4a4fe303543159423a9c80b496cc7, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=311, earliestPutTs=1732785731471 2024-11-28T09:22:12,697 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting a7fc82142cf9408e898d72dc7a78af84, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=293, earliestPutTs=1732785730331 2024-11-28T09:22:12,698 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting a760833c19e04816a745bcf909b85799, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=332, earliestPutTs=1732785732120 2024-11-28T09:22:12,698 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting ab6aeafb7ed746199c01f07921539f8a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=311, earliestPutTs=1732785731471 2024-11-28T09:22:12,699 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 382987eecedc4f5e8ce3de66d5771eca, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=332, earliestPutTs=1732785732120 2024-11-28T09:22:12,710 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0ad9609950a045418498b830dd929869#B#compaction#318 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:22:12,710 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0ad9609950a045418498b830dd929869#A#compaction#319 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:22:12,710 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/8bde5bb1551d4f2cae7f3831a5903d2f is 50, key is test_row_0/B:col10/1732785732120/Put/seqid=0 2024-11-28T09:22:12,711 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/7436708ef4ed4b5d945809b2da0df232 is 50, key is test_row_0/A:col10/1732785732120/Put/seqid=0 2024-11-28T09:22:12,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-28T09:22:12,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742197_1373 (size=13085) 2024-11-28T09:22:12,746 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/8bde5bb1551d4f2cae7f3831a5903d2f as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/8bde5bb1551d4f2cae7f3831a5903d2f 2024-11-28T09:22:12,751 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0ad9609950a045418498b830dd929869/B of 0ad9609950a045418498b830dd929869 into 8bde5bb1551d4f2cae7f3831a5903d2f(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:22:12,751 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0ad9609950a045418498b830dd929869: 2024-11-28T09:22:12,751 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869., storeName=0ad9609950a045418498b830dd929869/B, priority=13, startTime=1732785732695; duration=0sec 2024-11-28T09:22:12,751 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:22:12,751 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0ad9609950a045418498b830dd929869:B 2024-11-28T09:22:12,751 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:22:12,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742198_1374 (size=13085) 2024-11-28T09:22:12,754 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:22:12,754 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): 0ad9609950a045418498b830dd929869/C is initiating minor compaction (all files) 2024-11-28T09:22:12,754 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0ad9609950a045418498b830dd929869/C in TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:12,754 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/a571ec66d3a34ceb867c1d3d0b4ef012, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/4c182da125fb4801b7d19d2d942a420b, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/1bb76c477831479fab5c030a114a205b] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp, totalSize=36.7 K 2024-11-28T09:22:12,763 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting a571ec66d3a34ceb867c1d3d0b4ef012, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=293, earliestPutTs=1732785730331 2024-11-28T09:22:12,764 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 4c182da125fb4801b7d19d2d942a420b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=311, earliestPutTs=1732785731471 2024-11-28T09:22:12,764 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 1bb76c477831479fab5c030a114a205b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=332, earliestPutTs=1732785732120 2024-11-28T09:22:12,778 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0ad9609950a045418498b830dd929869#C#compaction#320 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:22:12,778 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/48143c8041da43508f439891189a9b60 is 50, key is test_row_0/C:col10/1732785732120/Put/seqid=0 2024-11-28T09:22:12,785 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:12,785 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-28T09:22:12,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:12,786 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2837): Flushing 0ad9609950a045418498b830dd929869 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-28T09:22:12,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0ad9609950a045418498b830dd929869, store=A 2024-11-28T09:22:12,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:12,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0ad9609950a045418498b830dd929869, store=B 2024-11-28T09:22:12,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:12,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0ad9609950a045418498b830dd929869, store=C 2024-11-28T09:22:12,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:12,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742199_1375 (size=13085) 2024-11-28T09:22:12,792 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/3f5e7bf7aa8845d7b37200ce45870c69 is 50, key is test_row_0/A:col10/1732785732244/Put/seqid=0 2024-11-28T09:22:12,797 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/48143c8041da43508f439891189a9b60 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/48143c8041da43508f439891189a9b60 2024-11-28T09:22:12,808 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0ad9609950a045418498b830dd929869/C of 0ad9609950a045418498b830dd929869 into 48143c8041da43508f439891189a9b60(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:22:12,808 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0ad9609950a045418498b830dd929869: 2024-11-28T09:22:12,808 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869., storeName=0ad9609950a045418498b830dd929869/C, priority=13, startTime=1732785732695; duration=0sec 2024-11-28T09:22:12,808 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:22:12,809 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0ad9609950a045418498b830dd929869:C 2024-11-28T09:22:12,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742200_1376 (size=12301) 2024-11-28T09:22:12,838 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=351 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/3f5e7bf7aa8845d7b37200ce45870c69 2024-11-28T09:22:12,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/28e0da7b97a6402d9a9abeb94443342a is 50, key is test_row_0/B:col10/1732785732244/Put/seqid=0 2024-11-28T09:22:12,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 0ad9609950a045418498b830dd929869 2024-11-28T09:22:12,864 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. as already flushing 2024-11-28T09:22:12,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742201_1377 (size=12301) 2024-11-28T09:22:12,868 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=351 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/28e0da7b97a6402d9a9abeb94443342a 2024-11-28T09:22:12,882 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/1c3e2fa59b2c41398500398046e02170 is 50, key is test_row_0/C:col10/1732785732244/Put/seqid=0 2024-11-28T09:22:12,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742202_1378 (size=12301) 2024-11-28T09:22:12,919 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:12,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45346 deadline: 1732785792916, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:12,919 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:12,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45252 deadline: 1732785792916, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:12,919 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:12,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45320 deadline: 1732785792916, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:12,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-28T09:22:13,021 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:13,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45346 deadline: 1732785793021, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:13,021 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:13,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45252 deadline: 1732785793021, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:13,022 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:13,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45320 deadline: 1732785793021, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:13,160 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/7436708ef4ed4b5d945809b2da0df232 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/7436708ef4ed4b5d945809b2da0df232 2024-11-28T09:22:13,175 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0ad9609950a045418498b830dd929869/A of 0ad9609950a045418498b830dd929869 into 7436708ef4ed4b5d945809b2da0df232(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:22:13,176 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0ad9609950a045418498b830dd929869: 2024-11-28T09:22:13,176 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869., storeName=0ad9609950a045418498b830dd929869/A, priority=13, startTime=1732785732695; duration=0sec 2024-11-28T09:22:13,176 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:22:13,176 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0ad9609950a045418498b830dd929869:A 2024-11-28T09:22:13,224 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:13,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45346 deadline: 1732785793223, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:13,225 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:13,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45252 deadline: 1732785793224, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:13,227 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:13,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45320 deadline: 1732785793225, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:13,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-28T09:22:13,305 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=351 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/1c3e2fa59b2c41398500398046e02170 2024-11-28T09:22:13,310 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/3f5e7bf7aa8845d7b37200ce45870c69 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/3f5e7bf7aa8845d7b37200ce45870c69 2024-11-28T09:22:13,314 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/3f5e7bf7aa8845d7b37200ce45870c69, entries=150, sequenceid=351, filesize=12.0 K 2024-11-28T09:22:13,316 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/28e0da7b97a6402d9a9abeb94443342a as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/28e0da7b97a6402d9a9abeb94443342a 2024-11-28T09:22:13,321 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/28e0da7b97a6402d9a9abeb94443342a, entries=150, sequenceid=351, filesize=12.0 K 2024-11-28T09:22:13,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/1c3e2fa59b2c41398500398046e02170 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/1c3e2fa59b2c41398500398046e02170 2024-11-28T09:22:13,335 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/1c3e2fa59b2c41398500398046e02170, entries=150, sequenceid=351, filesize=12.0 K 2024-11-28T09:22:13,337 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=120.76 KB/123660 for 0ad9609950a045418498b830dd929869 in 550ms, sequenceid=351, compaction requested=false 2024-11-28T09:22:13,337 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2538): Flush status journal for 0ad9609950a045418498b830dd929869: 2024-11-28T09:22:13,337 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:13,337 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=82 2024-11-28T09:22:13,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4106): Remote procedure done, pid=82 2024-11-28T09:22:13,340 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=82, resume processing ppid=81 2024-11-28T09:22:13,340 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=82, ppid=81, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 704 msec 2024-11-28T09:22:13,342 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=81, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees in 710 msec 2024-11-28T09:22:13,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 0ad9609950a045418498b830dd929869 2024-11-28T09:22:13,528 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0ad9609950a045418498b830dd929869 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-28T09:22:13,530 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0ad9609950a045418498b830dd929869, store=A 2024-11-28T09:22:13,531 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:13,531 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0ad9609950a045418498b830dd929869, store=B 2024-11-28T09:22:13,531 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:13,531 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0ad9609950a045418498b830dd929869, store=C 2024-11-28T09:22:13,531 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:13,538 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/2105ced845bd4d15a1396c4d4bf44bab is 50, key is test_row_0/A:col10/1732785732881/Put/seqid=0 2024-11-28T09:22:13,547 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:13,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45346 deadline: 1732785793544, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:13,549 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:13,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45252 deadline: 1732785793546, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:13,552 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:13,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45320 deadline: 1732785793552, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:13,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742203_1379 (size=12301) 2024-11-28T09:22:13,576 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=376 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/2105ced845bd4d15a1396c4d4bf44bab 2024-11-28T09:22:13,585 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/e280f892d5084cd1a8d0b8b45e39b9e4 is 50, key is test_row_0/B:col10/1732785732881/Put/seqid=0 2024-11-28T09:22:13,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742204_1380 (size=12301) 2024-11-28T09:22:13,630 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=376 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/e280f892d5084cd1a8d0b8b45e39b9e4 2024-11-28T09:22:13,646 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/cd3a9300e682471b958edf9efc4f24c4 is 50, key is test_row_0/C:col10/1732785732881/Put/seqid=0 2024-11-28T09:22:13,650 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:13,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45346 deadline: 1732785793648, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:13,652 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:13,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45252 deadline: 1732785793650, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:13,656 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:13,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45320 deadline: 1732785793654, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:13,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742205_1381 (size=12301) 2024-11-28T09:22:13,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-28T09:22:13,737 INFO [Thread-1372 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 81 completed 2024-11-28T09:22:13,738 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T09:22:13,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] procedure2.ProcedureExecutor(1098): Stored pid=83, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees 2024-11-28T09:22:13,739 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=83, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T09:22:13,740 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=83, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T09:22:13,740 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=84, ppid=83, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T09:22:13,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-28T09:22:13,754 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:13,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45298 deadline: 1732785793752, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:13,755 DEBUG [Thread-1368 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8196 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869., hostname=363d8d38a970,33819,1732785660637, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-28T09:22:13,757 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:13,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45338 deadline: 1732785793756, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:13,758 DEBUG [Thread-1366 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8199 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869., hostname=363d8d38a970,33819,1732785660637, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-28T09:22:13,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-28T09:22:13,852 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:13,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45346 deadline: 1732785793851, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:13,855 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:13,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45252 deadline: 1732785793854, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:13,859 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:13,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45320 deadline: 1732785793858, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:13,892 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:13,893 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-28T09:22:13,893 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:13,893 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. as already flushing 2024-11-28T09:22:13,893 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:13,894 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:13,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:13,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:14,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-28T09:22:14,048 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:14,048 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-28T09:22:14,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:14,049 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. as already flushing 2024-11-28T09:22:14,049 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:14,049 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:14,049 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:14,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:14,058 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=376 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/cd3a9300e682471b958edf9efc4f24c4 2024-11-28T09:22:14,063 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/2105ced845bd4d15a1396c4d4bf44bab as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/2105ced845bd4d15a1396c4d4bf44bab 2024-11-28T09:22:14,069 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/2105ced845bd4d15a1396c4d4bf44bab, entries=150, sequenceid=376, filesize=12.0 K 2024-11-28T09:22:14,070 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/e280f892d5084cd1a8d0b8b45e39b9e4 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/e280f892d5084cd1a8d0b8b45e39b9e4 2024-11-28T09:22:14,074 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/e280f892d5084cd1a8d0b8b45e39b9e4, entries=150, sequenceid=376, filesize=12.0 K 2024-11-28T09:22:14,076 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/cd3a9300e682471b958edf9efc4f24c4 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/cd3a9300e682471b958edf9efc4f24c4 2024-11-28T09:22:14,079 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/cd3a9300e682471b958edf9efc4f24c4, entries=150, sequenceid=376, filesize=12.0 K 2024-11-28T09:22:14,084 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 0ad9609950a045418498b830dd929869 in 556ms, sequenceid=376, compaction requested=true 2024-11-28T09:22:14,084 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0ad9609950a045418498b830dd929869: 2024-11-28T09:22:14,085 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0ad9609950a045418498b830dd929869:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T09:22:14,085 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:22:14,085 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:22:14,085 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0ad9609950a045418498b830dd929869:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T09:22:14,085 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:22:14,085 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0ad9609950a045418498b830dd929869:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T09:22:14,085 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:22:14,085 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:22:14,086 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:22:14,086 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:22:14,086 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): 0ad9609950a045418498b830dd929869/B is initiating minor compaction (all files) 2024-11-28T09:22:14,086 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1540): 0ad9609950a045418498b830dd929869/A is initiating minor compaction (all files) 2024-11-28T09:22:14,086 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0ad9609950a045418498b830dd929869/B in TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:14,086 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0ad9609950a045418498b830dd929869/A in TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:14,086 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/8bde5bb1551d4f2cae7f3831a5903d2f, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/28e0da7b97a6402d9a9abeb94443342a, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/e280f892d5084cd1a8d0b8b45e39b9e4] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp, totalSize=36.8 K 2024-11-28T09:22:14,086 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/7436708ef4ed4b5d945809b2da0df232, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/3f5e7bf7aa8845d7b37200ce45870c69, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/2105ced845bd4d15a1396c4d4bf44bab] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp, totalSize=36.8 K 2024-11-28T09:22:14,087 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7436708ef4ed4b5d945809b2da0df232, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=332, earliestPutTs=1732785732120 2024-11-28T09:22:14,087 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 8bde5bb1551d4f2cae7f3831a5903d2f, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=332, earliestPutTs=1732785732120 2024-11-28T09:22:14,087 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3f5e7bf7aa8845d7b37200ce45870c69, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=351, earliestPutTs=1732785732244 2024-11-28T09:22:14,087 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 28e0da7b97a6402d9a9abeb94443342a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=351, earliestPutTs=1732785732244 2024-11-28T09:22:14,088 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting e280f892d5084cd1a8d0b8b45e39b9e4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=376, earliestPutTs=1732785732881 2024-11-28T09:22:14,088 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2105ced845bd4d15a1396c4d4bf44bab, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=376, earliestPutTs=1732785732881 2024-11-28T09:22:14,099 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0ad9609950a045418498b830dd929869#A#compaction#327 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:22:14,099 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/ddb193e84dc64cbeba447aa63aa1761d is 50, key is test_row_0/A:col10/1732785732881/Put/seqid=0 2024-11-28T09:22:14,106 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0ad9609950a045418498b830dd929869#B#compaction#328 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:22:14,107 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/b6478d64f1f04c45a2ff53a3d08bf3c7 is 50, key is test_row_0/B:col10/1732785732881/Put/seqid=0 2024-11-28T09:22:14,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742206_1382 (size=13187) 2024-11-28T09:22:14,125 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/ddb193e84dc64cbeba447aa63aa1761d as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/ddb193e84dc64cbeba447aa63aa1761d 2024-11-28T09:22:14,130 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0ad9609950a045418498b830dd929869/A of 0ad9609950a045418498b830dd929869 into ddb193e84dc64cbeba447aa63aa1761d(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:22:14,130 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0ad9609950a045418498b830dd929869: 2024-11-28T09:22:14,130 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869., storeName=0ad9609950a045418498b830dd929869/A, priority=13, startTime=1732785734085; duration=0sec 2024-11-28T09:22:14,130 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:22:14,130 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0ad9609950a045418498b830dd929869:A 2024-11-28T09:22:14,131 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:22:14,132 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:22:14,132 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1540): 0ad9609950a045418498b830dd929869/C is initiating minor compaction (all files) 2024-11-28T09:22:14,132 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0ad9609950a045418498b830dd929869/C in TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:14,132 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/48143c8041da43508f439891189a9b60, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/1c3e2fa59b2c41398500398046e02170, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/cd3a9300e682471b958edf9efc4f24c4] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp, totalSize=36.8 K 2024-11-28T09:22:14,133 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 48143c8041da43508f439891189a9b60, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=332, earliestPutTs=1732785732120 2024-11-28T09:22:14,133 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1c3e2fa59b2c41398500398046e02170, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=351, earliestPutTs=1732785732244 2024-11-28T09:22:14,133 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting cd3a9300e682471b958edf9efc4f24c4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=376, earliestPutTs=1732785732881 2024-11-28T09:22:14,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742207_1383 (size=13187) 2024-11-28T09:22:14,143 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/b6478d64f1f04c45a2ff53a3d08bf3c7 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/b6478d64f1f04c45a2ff53a3d08bf3c7 2024-11-28T09:22:14,151 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0ad9609950a045418498b830dd929869/B of 0ad9609950a045418498b830dd929869 into b6478d64f1f04c45a2ff53a3d08bf3c7(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:22:14,151 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0ad9609950a045418498b830dd929869: 2024-11-28T09:22:14,151 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869., storeName=0ad9609950a045418498b830dd929869/B, priority=13, startTime=1732785734085; duration=0sec 2024-11-28T09:22:14,151 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:22:14,151 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0ad9609950a045418498b830dd929869:B 2024-11-28T09:22:14,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 0ad9609950a045418498b830dd929869 2024-11-28T09:22:14,158 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0ad9609950a045418498b830dd929869 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-28T09:22:14,158 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0ad9609950a045418498b830dd929869, store=A 2024-11-28T09:22:14,158 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:14,158 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0ad9609950a045418498b830dd929869, store=B 2024-11-28T09:22:14,158 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:14,158 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0ad9609950a045418498b830dd929869, store=C 2024-11-28T09:22:14,158 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:14,164 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0ad9609950a045418498b830dd929869#C#compaction#329 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:22:14,165 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/927472b689f94b53bf2783db4b20a3fb is 50, key is test_row_0/C:col10/1732785732881/Put/seqid=0 2024-11-28T09:22:14,167 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/553835f43d5742ef8ef8cdcb55405b05 is 50, key is test_row_0/A:col10/1732785734156/Put/seqid=0 2024-11-28T09:22:14,201 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:14,204 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-28T09:22:14,204 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:14,204 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. as already flushing 2024-11-28T09:22:14,204 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:14,204 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:14,204 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:14,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:14,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742208_1384 (size=13187) 2024-11-28T09:22:14,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742209_1385 (size=12301) 2024-11-28T09:22:14,228 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=392 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/553835f43d5742ef8ef8cdcb55405b05 2024-11-28T09:22:14,239 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/03117742c21a42dbb2c1deeb699171ab is 50, key is test_row_0/B:col10/1732785734156/Put/seqid=0 2024-11-28T09:22:14,255 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:14,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45320 deadline: 1732785794252, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:14,255 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:14,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45346 deadline: 1732785794252, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:14,256 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:14,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45252 deadline: 1732785794252, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:14,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742210_1386 (size=12301) 2024-11-28T09:22:14,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-28T09:22:14,356 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:14,357 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-28T09:22:14,357 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:14,357 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. as already flushing 2024-11-28T09:22:14,357 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:14,357 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:14,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:14,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:14,360 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:14,360 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:14,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45252 deadline: 1732785794357, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:14,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45346 deadline: 1732785794357, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:14,361 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:14,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45320 deadline: 1732785794358, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:14,510 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:14,510 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-28T09:22:14,510 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:14,510 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. as already flushing 2024-11-28T09:22:14,510 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:14,511 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:14,511 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:14,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:14,563 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:14,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45320 deadline: 1732785794562, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:14,564 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:14,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45346 deadline: 1732785794562, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:14,564 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:14,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45252 deadline: 1732785794562, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:14,624 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/927472b689f94b53bf2783db4b20a3fb as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/927472b689f94b53bf2783db4b20a3fb 2024-11-28T09:22:14,630 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0ad9609950a045418498b830dd929869/C of 0ad9609950a045418498b830dd929869 into 927472b689f94b53bf2783db4b20a3fb(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:22:14,630 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0ad9609950a045418498b830dd929869: 2024-11-28T09:22:14,630 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869., storeName=0ad9609950a045418498b830dd929869/C, priority=13, startTime=1732785734085; duration=0sec 2024-11-28T09:22:14,630 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:22:14,630 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0ad9609950a045418498b830dd929869:C 2024-11-28T09:22:14,663 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:14,663 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-28T09:22:14,663 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:14,663 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. as already flushing 2024-11-28T09:22:14,664 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:14,664 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:14,664 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:14,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:14,681 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=392 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/03117742c21a42dbb2c1deeb699171ab 2024-11-28T09:22:14,688 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/b6f5f8c123f242bfb68a20fde5000b47 is 50, key is test_row_0/C:col10/1732785734156/Put/seqid=0 2024-11-28T09:22:14,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742211_1387 (size=12301) 2024-11-28T09:22:14,693 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=392 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/b6f5f8c123f242bfb68a20fde5000b47 2024-11-28T09:22:14,697 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/553835f43d5742ef8ef8cdcb55405b05 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/553835f43d5742ef8ef8cdcb55405b05 2024-11-28T09:22:14,702 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/553835f43d5742ef8ef8cdcb55405b05, entries=150, sequenceid=392, filesize=12.0 K 2024-11-28T09:22:14,703 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/03117742c21a42dbb2c1deeb699171ab as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/03117742c21a42dbb2c1deeb699171ab 2024-11-28T09:22:14,707 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/03117742c21a42dbb2c1deeb699171ab, entries=150, sequenceid=392, filesize=12.0 K 2024-11-28T09:22:14,708 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/b6f5f8c123f242bfb68a20fde5000b47 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/b6f5f8c123f242bfb68a20fde5000b47 2024-11-28T09:22:14,711 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/b6f5f8c123f242bfb68a20fde5000b47, entries=150, sequenceid=392, filesize=12.0 K 2024-11-28T09:22:14,712 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=134.18 KB/137400 for 0ad9609950a045418498b830dd929869 in 554ms, sequenceid=392, compaction requested=false 2024-11-28T09:22:14,712 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0ad9609950a045418498b830dd929869: 2024-11-28T09:22:14,815 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:14,816 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-28T09:22:14,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:14,816 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2837): Flushing 0ad9609950a045418498b830dd929869 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-28T09:22:14,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0ad9609950a045418498b830dd929869, store=A 2024-11-28T09:22:14,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:14,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0ad9609950a045418498b830dd929869, store=B 2024-11-28T09:22:14,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:14,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0ad9609950a045418498b830dd929869, store=C 2024-11-28T09:22:14,817 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:14,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/1cc3dca00d2d4fd9a6e3f8eb95a1eb95 is 50, key is test_row_0/A:col10/1732785734250/Put/seqid=0 2024-11-28T09:22:14,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742212_1388 (size=12301) 2024-11-28T09:22:14,835 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=416 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/1cc3dca00d2d4fd9a6e3f8eb95a1eb95 2024-11-28T09:22:14,844 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/875876e8e5e942c2be1cf1bca4e23edd is 50, key is test_row_0/B:col10/1732785734250/Put/seqid=0 2024-11-28T09:22:14,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-28T09:22:14,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742213_1389 (size=12301) 2024-11-28T09:22:14,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 0ad9609950a045418498b830dd929869 2024-11-28T09:22:14,867 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. as already flushing 2024-11-28T09:22:14,879 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:14,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45320 deadline: 1732785794877, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:14,880 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:14,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45346 deadline: 1732785794878, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:14,880 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:14,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45252 deadline: 1732785794879, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:14,982 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:14,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45320 deadline: 1732785794981, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:14,982 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:14,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45346 deadline: 1732785794981, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:14,983 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:14,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45252 deadline: 1732785794981, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:15,194 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:15,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45320 deadline: 1732785795192, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:15,194 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:15,194 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:15,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45252 deadline: 1732785795194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:15,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45346 deadline: 1732785795194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:15,250 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=416 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/875876e8e5e942c2be1cf1bca4e23edd 2024-11-28T09:22:15,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/008a8e2f1e8242bebf53d7f8800b2988 is 50, key is test_row_0/C:col10/1732785734250/Put/seqid=0 2024-11-28T09:22:15,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742214_1390 (size=12301) 2024-11-28T09:22:15,274 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=416 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/008a8e2f1e8242bebf53d7f8800b2988 2024-11-28T09:22:15,281 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/1cc3dca00d2d4fd9a6e3f8eb95a1eb95 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/1cc3dca00d2d4fd9a6e3f8eb95a1eb95 2024-11-28T09:22:15,286 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/1cc3dca00d2d4fd9a6e3f8eb95a1eb95, entries=150, sequenceid=416, filesize=12.0 K 2024-11-28T09:22:15,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/875876e8e5e942c2be1cf1bca4e23edd as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/875876e8e5e942c2be1cf1bca4e23edd 2024-11-28T09:22:15,291 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/875876e8e5e942c2be1cf1bca4e23edd, entries=150, sequenceid=416, filesize=12.0 K 2024-11-28T09:22:15,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/008a8e2f1e8242bebf53d7f8800b2988 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/008a8e2f1e8242bebf53d7f8800b2988 2024-11-28T09:22:15,296 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/008a8e2f1e8242bebf53d7f8800b2988, entries=150, sequenceid=416, filesize=12.0 K 2024-11-28T09:22:15,297 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 0ad9609950a045418498b830dd929869 in 480ms, sequenceid=416, compaction requested=true 2024-11-28T09:22:15,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2538): Flush status journal for 0ad9609950a045418498b830dd929869: 2024-11-28T09:22:15,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:15,297 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=84 2024-11-28T09:22:15,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4106): Remote procedure done, pid=84 2024-11-28T09:22:15,299 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=84, resume processing ppid=83 2024-11-28T09:22:15,299 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=84, ppid=83, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5580 sec 2024-11-28T09:22:15,301 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=83, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees in 1.5620 sec 2024-11-28T09:22:15,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 0ad9609950a045418498b830dd929869 2024-11-28T09:22:15,498 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0ad9609950a045418498b830dd929869 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-28T09:22:15,498 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0ad9609950a045418498b830dd929869, store=A 2024-11-28T09:22:15,499 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:15,499 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0ad9609950a045418498b830dd929869, store=B 2024-11-28T09:22:15,499 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:15,499 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0ad9609950a045418498b830dd929869, store=C 2024-11-28T09:22:15,499 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:15,507 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/1cb5c373087545fc8558231813fcb1bd is 50, key is test_row_0/A:col10/1732785734869/Put/seqid=0 2024-11-28T09:22:15,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742215_1391 (size=12301) 2024-11-28T09:22:15,512 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=431 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/1cb5c373087545fc8558231813fcb1bd 2024-11-28T09:22:15,518 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/06c77337be05470693be97b9d9f20171 is 50, key is test_row_0/B:col10/1732785734869/Put/seqid=0 2024-11-28T09:22:15,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742216_1392 (size=12301) 2024-11-28T09:22:15,528 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:15,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45346 deadline: 1732785795525, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:15,528 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:15,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 223 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45252 deadline: 1732785795526, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:15,529 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:15,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45320 deadline: 1732785795527, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:15,631 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:15,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45346 deadline: 1732785795629, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:15,631 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:15,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 225 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45252 deadline: 1732785795629, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:15,631 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:15,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45320 deadline: 1732785795630, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:15,833 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:15,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45346 deadline: 1732785795832, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:15,834 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:15,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 227 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45252 deadline: 1732785795832, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:15,834 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:15,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45320 deadline: 1732785795832, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:15,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-28T09:22:15,845 INFO [Thread-1372 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 83 completed 2024-11-28T09:22:15,847 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T09:22:15,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] procedure2.ProcedureExecutor(1098): Stored pid=85, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees 2024-11-28T09:22:15,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-28T09:22:15,848 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=85, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T09:22:15,849 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=85, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T09:22:15,849 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=86, ppid=85, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T09:22:15,922 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=431 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/06c77337be05470693be97b9d9f20171 2024-11-28T09:22:15,929 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/60213f43ef484492b81252d072ffd5c8 is 50, key is test_row_0/C:col10/1732785734869/Put/seqid=0 2024-11-28T09:22:15,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742217_1393 (size=12301) 2024-11-28T09:22:15,934 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=431 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/60213f43ef484492b81252d072ffd5c8 2024-11-28T09:22:15,939 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/1cb5c373087545fc8558231813fcb1bd as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/1cb5c373087545fc8558231813fcb1bd 2024-11-28T09:22:15,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-28T09:22:15,951 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/1cb5c373087545fc8558231813fcb1bd, entries=150, sequenceid=431, filesize=12.0 K 2024-11-28T09:22:15,952 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/06c77337be05470693be97b9d9f20171 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/06c77337be05470693be97b9d9f20171 2024-11-28T09:22:15,957 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/06c77337be05470693be97b9d9f20171, entries=150, sequenceid=431, filesize=12.0 K 2024-11-28T09:22:15,957 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/60213f43ef484492b81252d072ffd5c8 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/60213f43ef484492b81252d072ffd5c8 2024-11-28T09:22:15,961 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/60213f43ef484492b81252d072ffd5c8, entries=150, sequenceid=431, filesize=12.0 K 2024-11-28T09:22:15,962 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 0ad9609950a045418498b830dd929869 in 464ms, sequenceid=431, compaction requested=true 2024-11-28T09:22:15,962 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0ad9609950a045418498b830dd929869: 2024-11-28T09:22:15,962 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0ad9609950a045418498b830dd929869:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T09:22:15,962 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:22:15,962 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T09:22:15,962 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0ad9609950a045418498b830dd929869:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T09:22:15,962 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:22:15,962 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0ad9609950a045418498b830dd929869:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T09:22:15,962 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:22:15,962 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T09:22:15,963 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50090 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T09:22:15,963 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50090 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T09:22:15,963 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1540): 0ad9609950a045418498b830dd929869/A is initiating minor compaction (all files) 2024-11-28T09:22:15,963 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): 0ad9609950a045418498b830dd929869/B is initiating minor compaction (all files) 2024-11-28T09:22:15,963 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0ad9609950a045418498b830dd929869/A in TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:15,963 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0ad9609950a045418498b830dd929869/B in TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:15,963 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/ddb193e84dc64cbeba447aa63aa1761d, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/553835f43d5742ef8ef8cdcb55405b05, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/1cc3dca00d2d4fd9a6e3f8eb95a1eb95, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/1cb5c373087545fc8558231813fcb1bd] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp, totalSize=48.9 K 2024-11-28T09:22:15,963 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/b6478d64f1f04c45a2ff53a3d08bf3c7, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/03117742c21a42dbb2c1deeb699171ab, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/875876e8e5e942c2be1cf1bca4e23edd, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/06c77337be05470693be97b9d9f20171] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp, totalSize=48.9 K 2024-11-28T09:22:15,963 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting b6478d64f1f04c45a2ff53a3d08bf3c7, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=376, earliestPutTs=1732785732881 2024-11-28T09:22:15,963 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting ddb193e84dc64cbeba447aa63aa1761d, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=376, earliestPutTs=1732785732881 2024-11-28T09:22:15,964 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 03117742c21a42dbb2c1deeb699171ab, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=392, earliestPutTs=1732785733540 2024-11-28T09:22:15,964 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 553835f43d5742ef8ef8cdcb55405b05, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=392, earliestPutTs=1732785733540 2024-11-28T09:22:15,964 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 875876e8e5e942c2be1cf1bca4e23edd, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=416, earliestPutTs=1732785734236 2024-11-28T09:22:15,964 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1cc3dca00d2d4fd9a6e3f8eb95a1eb95, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=416, earliestPutTs=1732785734236 2024-11-28T09:22:15,964 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 06c77337be05470693be97b9d9f20171, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=431, earliestPutTs=1732785734869 2024-11-28T09:22:15,964 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1cb5c373087545fc8558231813fcb1bd, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=431, earliestPutTs=1732785734869 2024-11-28T09:22:15,973 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0ad9609950a045418498b830dd929869#B#compaction#339 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:22:15,974 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/5487a2e5200c4d1cbdd3ce984eec2f30 is 50, key is test_row_0/B:col10/1732785734869/Put/seqid=0 2024-11-28T09:22:15,977 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0ad9609950a045418498b830dd929869#A#compaction#340 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:22:15,977 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/e414b2e3af3845f2b32092d3156157db is 50, key is test_row_0/A:col10/1732785734869/Put/seqid=0 2024-11-28T09:22:15,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742219_1395 (size=13323) 2024-11-28T09:22:15,997 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/e414b2e3af3845f2b32092d3156157db as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/e414b2e3af3845f2b32092d3156157db 2024-11-28T09:22:16,001 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:16,001 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-28T09:22:16,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:16,002 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2837): Flushing 0ad9609950a045418498b830dd929869 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-28T09:22:16,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0ad9609950a045418498b830dd929869, store=A 2024-11-28T09:22:16,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:16,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0ad9609950a045418498b830dd929869, store=B 2024-11-28T09:22:16,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:16,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0ad9609950a045418498b830dd929869, store=C 2024-11-28T09:22:16,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:16,004 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 0ad9609950a045418498b830dd929869/A of 0ad9609950a045418498b830dd929869 into e414b2e3af3845f2b32092d3156157db(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:22:16,004 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0ad9609950a045418498b830dd929869: 2024-11-28T09:22:16,005 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869., storeName=0ad9609950a045418498b830dd929869/A, priority=12, startTime=1732785735962; duration=0sec 2024-11-28T09:22:16,005 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:22:16,005 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0ad9609950a045418498b830dd929869:A 2024-11-28T09:22:16,005 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T09:22:16,006 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50090 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T09:22:16,007 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1540): 0ad9609950a045418498b830dd929869/C is initiating minor compaction (all files) 2024-11-28T09:22:16,007 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0ad9609950a045418498b830dd929869/C in TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:16,007 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/927472b689f94b53bf2783db4b20a3fb, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/b6f5f8c123f242bfb68a20fde5000b47, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/008a8e2f1e8242bebf53d7f8800b2988, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/60213f43ef484492b81252d072ffd5c8] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp, totalSize=48.9 K 2024-11-28T09:22:16,007 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 927472b689f94b53bf2783db4b20a3fb, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=376, earliestPutTs=1732785732881 2024-11-28T09:22:16,008 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting b6f5f8c123f242bfb68a20fde5000b47, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=392, earliestPutTs=1732785733540 2024-11-28T09:22:16,008 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 008a8e2f1e8242bebf53d7f8800b2988, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=416, earliestPutTs=1732785734236 2024-11-28T09:22:16,009 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 60213f43ef484492b81252d072ffd5c8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=431, earliestPutTs=1732785734869 2024-11-28T09:22:16,022 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/746661b693174a64876fb98985dfd950 is 50, key is test_row_0/A:col10/1732785735525/Put/seqid=0 2024-11-28T09:22:16,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742218_1394 (size=13323) 2024-11-28T09:22:16,029 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/5487a2e5200c4d1cbdd3ce984eec2f30 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/5487a2e5200c4d1cbdd3ce984eec2f30 2024-11-28T09:22:16,034 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 0ad9609950a045418498b830dd929869/B of 0ad9609950a045418498b830dd929869 into 5487a2e5200c4d1cbdd3ce984eec2f30(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:22:16,034 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0ad9609950a045418498b830dd929869: 2024-11-28T09:22:16,034 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869., storeName=0ad9609950a045418498b830dd929869/B, priority=12, startTime=1732785735962; duration=0sec 2024-11-28T09:22:16,035 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:22:16,035 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0ad9609950a045418498b830dd929869:B 2024-11-28T09:22:16,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742220_1396 (size=12301) 2024-11-28T09:22:16,038 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0ad9609950a045418498b830dd929869#C#compaction#342 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:22:16,039 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/ce1fc47482b04aee9d1257c4ac0b74b7 is 50, key is test_row_0/C:col10/1732785734869/Put/seqid=0 2024-11-28T09:22:16,048 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=452 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/746661b693174a64876fb98985dfd950 2024-11-28T09:22:16,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742221_1397 (size=13323) 2024-11-28T09:22:16,057 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/e6eb3e89db1d47df8fd94e46d7bef978 is 50, key is test_row_0/B:col10/1732785735525/Put/seqid=0 2024-11-28T09:22:16,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742222_1398 (size=12301) 2024-11-28T09:22:16,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 0ad9609950a045418498b830dd929869 2024-11-28T09:22:16,137 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. as already flushing 2024-11-28T09:22:16,149 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:16,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 233 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45252 deadline: 1732785796146, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:16,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-28T09:22:16,152 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:16,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45346 deadline: 1732785796149, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:16,153 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:16,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45320 deadline: 1732785796150, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:16,251 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:16,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 235 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45252 deadline: 1732785796250, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:16,254 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:16,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 223 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45346 deadline: 1732785796254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:16,256 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:16,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45320 deadline: 1732785796254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:16,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-28T09:22:16,454 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:16,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 237 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45252 deadline: 1732785796453, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:16,457 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/ce1fc47482b04aee9d1257c4ac0b74b7 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/ce1fc47482b04aee9d1257c4ac0b74b7 2024-11-28T09:22:16,459 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:16,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 225 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45346 deadline: 1732785796457, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:16,460 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:16,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45320 deadline: 1732785796459, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:16,462 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=452 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/e6eb3e89db1d47df8fd94e46d7bef978 2024-11-28T09:22:16,463 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 0ad9609950a045418498b830dd929869/C of 0ad9609950a045418498b830dd929869 into ce1fc47482b04aee9d1257c4ac0b74b7(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:22:16,463 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0ad9609950a045418498b830dd929869: 2024-11-28T09:22:16,463 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869., storeName=0ad9609950a045418498b830dd929869/C, priority=12, startTime=1732785735962; duration=0sec 2024-11-28T09:22:16,463 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:22:16,463 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0ad9609950a045418498b830dd929869:C 2024-11-28T09:22:16,469 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/04a3f1638a5e4f35976a44deac1e4f4a is 50, key is test_row_0/C:col10/1732785735525/Put/seqid=0 2024-11-28T09:22:16,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742223_1399 (size=12301) 2024-11-28T09:22:16,478 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=452 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/04a3f1638a5e4f35976a44deac1e4f4a 2024-11-28T09:22:16,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/746661b693174a64876fb98985dfd950 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/746661b693174a64876fb98985dfd950 2024-11-28T09:22:16,489 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/746661b693174a64876fb98985dfd950, entries=150, sequenceid=452, filesize=12.0 K 2024-11-28T09:22:16,492 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/e6eb3e89db1d47df8fd94e46d7bef978 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/e6eb3e89db1d47df8fd94e46d7bef978 2024-11-28T09:22:16,499 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/e6eb3e89db1d47df8fd94e46d7bef978, entries=150, sequenceid=452, filesize=12.0 K 2024-11-28T09:22:16,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/04a3f1638a5e4f35976a44deac1e4f4a as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/04a3f1638a5e4f35976a44deac1e4f4a 2024-11-28T09:22:16,503 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/04a3f1638a5e4f35976a44deac1e4f4a, entries=150, sequenceid=452, filesize=12.0 K 2024-11-28T09:22:16,504 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=87.22 KB/89310 for 0ad9609950a045418498b830dd929869 in 503ms, sequenceid=452, compaction requested=false 2024-11-28T09:22:16,504 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2538): Flush status journal for 0ad9609950a045418498b830dd929869: 2024-11-28T09:22:16,504 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:16,504 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=86 2024-11-28T09:22:16,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4106): Remote procedure done, pid=86 2024-11-28T09:22:16,507 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=86, resume processing ppid=85 2024-11-28T09:22:16,507 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=86, ppid=85, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 656 msec 2024-11-28T09:22:16,508 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=85, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees in 660 msec 2024-11-28T09:22:16,761 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0ad9609950a045418498b830dd929869 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-11-28T09:22:16,761 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0ad9609950a045418498b830dd929869, store=A 2024-11-28T09:22:16,762 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:16,762 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0ad9609950a045418498b830dd929869, store=B 2024-11-28T09:22:16,762 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:16,762 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0ad9609950a045418498b830dd929869, store=C 2024-11-28T09:22:16,762 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:16,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 0ad9609950a045418498b830dd929869 2024-11-28T09:22:16,771 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/4e45d21289dc4263aa23838c4b697a15 is 50, key is test_row_0/A:col10/1732785736148/Put/seqid=0 2024-11-28T09:22:16,804 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:16,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45320 deadline: 1732785796799, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:16,806 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:16,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 246 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45252 deadline: 1732785796802, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:16,809 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:16,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 233 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45346 deadline: 1732785796805, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:16,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742224_1400 (size=14741) 2024-11-28T09:22:16,906 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:16,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45320 deadline: 1732785796906, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:16,908 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:16,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 248 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45252 deadline: 1732785796908, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:16,912 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:16,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 235 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45346 deadline: 1732785796910, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:16,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-28T09:22:16,952 INFO [Thread-1372 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 85 completed 2024-11-28T09:22:16,953 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T09:22:16,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] procedure2.ProcedureExecutor(1098): Stored pid=87, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees 2024-11-28T09:22:16,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-28T09:22:16,955 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=87, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T09:22:16,955 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=87, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T09:22:16,955 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=88, ppid=87, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T09:22:17,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-28T09:22:17,107 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:17,107 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-11-28T09:22:17,107 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:17,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. as already flushing 2024-11-28T09:22:17,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:17,108 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] handler.RSProcedureHandler(58): pid=88 java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:17,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=88 java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:17,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=88 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:17,110 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:17,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45320 deadline: 1732785797110, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:17,111 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:17,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 250 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45252 deadline: 1732785797111, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:17,114 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:17,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 237 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45346 deadline: 1732785797113, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:17,212 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=472 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/4e45d21289dc4263aa23838c4b697a15 2024-11-28T09:22:17,234 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/ce5e97e1e9e64d99b47b0aa0663cbc6a is 50, key is test_row_0/B:col10/1732785736148/Put/seqid=0 2024-11-28T09:22:17,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742225_1401 (size=12301) 2024-11-28T09:22:17,256 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=472 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/ce5e97e1e9e64d99b47b0aa0663cbc6a 2024-11-28T09:22:17,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-28T09:22:17,260 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:17,261 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-11-28T09:22:17,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:17,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. as already flushing 2024-11-28T09:22:17,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:17,261 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] handler.RSProcedureHandler(58): pid=88 java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:17,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=88 java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:17,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=88 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:17,269 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/79e1c946d04642f2befe4a4500692295 is 50, key is test_row_0/C:col10/1732785736148/Put/seqid=0 2024-11-28T09:22:17,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742226_1402 (size=12301) 2024-11-28T09:22:17,299 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=472 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/79e1c946d04642f2befe4a4500692295 2024-11-28T09:22:17,306 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/4e45d21289dc4263aa23838c4b697a15 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/4e45d21289dc4263aa23838c4b697a15 2024-11-28T09:22:17,313 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/4e45d21289dc4263aa23838c4b697a15, entries=200, sequenceid=472, filesize=14.4 K 2024-11-28T09:22:17,314 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/ce5e97e1e9e64d99b47b0aa0663cbc6a as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/ce5e97e1e9e64d99b47b0aa0663cbc6a 2024-11-28T09:22:17,318 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/ce5e97e1e9e64d99b47b0aa0663cbc6a, entries=150, sequenceid=472, filesize=12.0 K 2024-11-28T09:22:17,319 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/79e1c946d04642f2befe4a4500692295 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/79e1c946d04642f2befe4a4500692295 2024-11-28T09:22:17,323 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/79e1c946d04642f2befe4a4500692295, entries=150, sequenceid=472, filesize=12.0 K 2024-11-28T09:22:17,324 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=107.34 KB/109920 for 0ad9609950a045418498b830dd929869 in 563ms, sequenceid=472, compaction requested=true 2024-11-28T09:22:17,324 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0ad9609950a045418498b830dd929869: 2024-11-28T09:22:17,324 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:22:17,325 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0ad9609950a045418498b830dd929869:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T09:22:17,325 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:22:17,325 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:22:17,326 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40365 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:22:17,326 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1540): 0ad9609950a045418498b830dd929869/A is initiating minor compaction (all files) 2024-11-28T09:22:17,326 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0ad9609950a045418498b830dd929869/A in TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:17,326 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/e414b2e3af3845f2b32092d3156157db, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/746661b693174a64876fb98985dfd950, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/4e45d21289dc4263aa23838c4b697a15] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp, totalSize=39.4 K 2024-11-28T09:22:17,327 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0ad9609950a045418498b830dd929869:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T09:22:17,327 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:22:17,327 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0ad9609950a045418498b830dd929869:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T09:22:17,327 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting e414b2e3af3845f2b32092d3156157db, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=431, earliestPutTs=1732785734869 2024-11-28T09:22:17,327 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:22:17,327 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37925 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:22:17,327 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): 0ad9609950a045418498b830dd929869/B is initiating minor compaction (all files) 2024-11-28T09:22:17,327 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0ad9609950a045418498b830dd929869/B in TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:17,327 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/5487a2e5200c4d1cbdd3ce984eec2f30, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/e6eb3e89db1d47df8fd94e46d7bef978, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/ce5e97e1e9e64d99b47b0aa0663cbc6a] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp, totalSize=37.0 K 2024-11-28T09:22:17,327 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 746661b693174a64876fb98985dfd950, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=452, earliestPutTs=1732785735524 2024-11-28T09:22:17,328 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 5487a2e5200c4d1cbdd3ce984eec2f30, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=431, earliestPutTs=1732785734869 2024-11-28T09:22:17,328 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4e45d21289dc4263aa23838c4b697a15, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=472, earliestPutTs=1732785736138 2024-11-28T09:22:17,328 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting e6eb3e89db1d47df8fd94e46d7bef978, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=452, earliestPutTs=1732785735524 2024-11-28T09:22:17,329 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting ce5e97e1e9e64d99b47b0aa0663cbc6a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=472, earliestPutTs=1732785736145 2024-11-28T09:22:17,343 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0ad9609950a045418498b830dd929869#A#compaction#348 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:22:17,344 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/beafa48671ec4afb97a85e2f71e79929 is 50, key is test_row_0/A:col10/1732785736148/Put/seqid=0 2024-11-28T09:22:17,353 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0ad9609950a045418498b830dd929869#B#compaction#349 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:22:17,354 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/febb343dc0e44f97b5239c130213a473 is 50, key is test_row_0/B:col10/1732785736148/Put/seqid=0 2024-11-28T09:22:17,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742227_1403 (size=13425) 2024-11-28T09:22:17,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742228_1404 (size=13425) 2024-11-28T09:22:17,367 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/beafa48671ec4afb97a85e2f71e79929 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/beafa48671ec4afb97a85e2f71e79929 2024-11-28T09:22:17,371 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0ad9609950a045418498b830dd929869/A of 0ad9609950a045418498b830dd929869 into beafa48671ec4afb97a85e2f71e79929(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:22:17,371 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0ad9609950a045418498b830dd929869: 2024-11-28T09:22:17,371 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869., storeName=0ad9609950a045418498b830dd929869/A, priority=13, startTime=1732785737324; duration=0sec 2024-11-28T09:22:17,372 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:22:17,372 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0ad9609950a045418498b830dd929869:A 2024-11-28T09:22:17,372 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:22:17,372 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37925 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:22:17,373 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1540): 0ad9609950a045418498b830dd929869/C is initiating minor compaction (all files) 2024-11-28T09:22:17,373 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0ad9609950a045418498b830dd929869/C in TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:17,373 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/ce1fc47482b04aee9d1257c4ac0b74b7, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/04a3f1638a5e4f35976a44deac1e4f4a, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/79e1c946d04642f2befe4a4500692295] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp, totalSize=37.0 K 2024-11-28T09:22:17,373 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting ce1fc47482b04aee9d1257c4ac0b74b7, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=431, earliestPutTs=1732785734869 2024-11-28T09:22:17,374 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 04a3f1638a5e4f35976a44deac1e4f4a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=452, earliestPutTs=1732785735524 2024-11-28T09:22:17,374 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 79e1c946d04642f2befe4a4500692295, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=472, earliestPutTs=1732785736145 2024-11-28T09:22:17,381 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0ad9609950a045418498b830dd929869#C#compaction#350 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:22:17,382 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/f65b94d7c2974570bd04ad1f00fc6e97 is 50, key is test_row_0/C:col10/1732785736148/Put/seqid=0 2024-11-28T09:22:17,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742229_1405 (size=13425) 2024-11-28T09:22:17,390 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/f65b94d7c2974570bd04ad1f00fc6e97 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/f65b94d7c2974570bd04ad1f00fc6e97 2024-11-28T09:22:17,394 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0ad9609950a045418498b830dd929869/C of 0ad9609950a045418498b830dd929869 into f65b94d7c2974570bd04ad1f00fc6e97(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:22:17,394 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0ad9609950a045418498b830dd929869: 2024-11-28T09:22:17,394 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869., storeName=0ad9609950a045418498b830dd929869/C, priority=13, startTime=1732785737327; duration=0sec 2024-11-28T09:22:17,395 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:22:17,395 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0ad9609950a045418498b830dd929869:C 2024-11-28T09:22:17,413 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:17,414 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-11-28T09:22:17,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:17,414 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2837): Flushing 0ad9609950a045418498b830dd929869 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-11-28T09:22:17,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 0ad9609950a045418498b830dd929869 2024-11-28T09:22:17,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0ad9609950a045418498b830dd929869, store=A 2024-11-28T09:22:17,414 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. as already flushing 2024-11-28T09:22:17,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:17,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0ad9609950a045418498b830dd929869, store=B 2024-11-28T09:22:17,415 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:17,415 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0ad9609950a045418498b830dd929869, store=C 2024-11-28T09:22:17,415 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:17,423 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/c0d466f910f4467783cd60b8a25b9b51 is 50, key is test_row_0/A:col10/1732785736801/Put/seqid=0 2024-11-28T09:22:17,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742230_1406 (size=12301) 2024-11-28T09:22:17,435 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:17,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 257 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45252 deadline: 1732785797433, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:17,438 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:17,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 244 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45346 deadline: 1732785797436, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:17,438 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:17,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 228 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45320 deadline: 1732785797436, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:17,539 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:17,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 259 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45252 deadline: 1732785797537, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:17,540 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:17,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 246 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45346 deadline: 1732785797539, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:17,540 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:17,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 230 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45320 deadline: 1732785797539, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:17,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-28T09:22:17,744 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:17,744 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:17,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 232 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45320 deadline: 1732785797741, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:17,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 261 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45252 deadline: 1732785797741, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:17,748 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:17,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 248 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45346 deadline: 1732785797742, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:17,770 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/febb343dc0e44f97b5239c130213a473 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/febb343dc0e44f97b5239c130213a473 2024-11-28T09:22:17,779 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0ad9609950a045418498b830dd929869/B of 0ad9609950a045418498b830dd929869 into febb343dc0e44f97b5239c130213a473(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:22:17,779 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0ad9609950a045418498b830dd929869: 2024-11-28T09:22:17,779 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869., storeName=0ad9609950a045418498b830dd929869/B, priority=13, startTime=1732785737325; duration=0sec 2024-11-28T09:22:17,779 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:22:17,779 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0ad9609950a045418498b830dd929869:B 2024-11-28T09:22:17,828 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=494 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/c0d466f910f4467783cd60b8a25b9b51 2024-11-28T09:22:17,854 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/46383e659f7e407f9f1d514f5edceb61 is 50, key is test_row_0/B:col10/1732785736801/Put/seqid=0 2024-11-28T09:22:17,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742231_1407 (size=12301) 2024-11-28T09:22:18,054 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:18,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 234 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45320 deadline: 1732785798046, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:18,058 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:18,059 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:18,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 263 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45252 deadline: 1732785798054, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:18,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-28T09:22:18,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 250 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45346 deadline: 1732785798054, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:18,293 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=494 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/46383e659f7e407f9f1d514f5edceb61 2024-11-28T09:22:18,347 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/8c23ed2b64f5422d9e64045835fda331 is 50, key is test_row_0/C:col10/1732785736801/Put/seqid=0 2024-11-28T09:22:18,412 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742232_1408 (size=12301) 2024-11-28T09:22:18,566 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:18,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 252 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45346 deadline: 1732785798562, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:18,567 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:18,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 265 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45252 deadline: 1732785798562, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:18,583 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:18,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 236 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45320 deadline: 1732785798580, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:18,814 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=494 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/8c23ed2b64f5422d9e64045835fda331 2024-11-28T09:22:18,819 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/c0d466f910f4467783cd60b8a25b9b51 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/c0d466f910f4467783cd60b8a25b9b51 2024-11-28T09:22:18,826 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/c0d466f910f4467783cd60b8a25b9b51, entries=150, sequenceid=494, filesize=12.0 K 2024-11-28T09:22:18,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/46383e659f7e407f9f1d514f5edceb61 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/46383e659f7e407f9f1d514f5edceb61 2024-11-28T09:22:18,833 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/46383e659f7e407f9f1d514f5edceb61, entries=150, sequenceid=494, filesize=12.0 K 2024-11-28T09:22:18,834 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/8c23ed2b64f5422d9e64045835fda331 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/8c23ed2b64f5422d9e64045835fda331 2024-11-28T09:22:18,840 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/8c23ed2b64f5422d9e64045835fda331, entries=150, sequenceid=494, filesize=12.0 K 2024-11-28T09:22:18,842 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for 0ad9609950a045418498b830dd929869 in 1428ms, sequenceid=494, compaction requested=false 2024-11-28T09:22:18,842 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2538): Flush status journal for 0ad9609950a045418498b830dd929869: 2024-11-28T09:22:18,842 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:18,842 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=88 2024-11-28T09:22:18,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4106): Remote procedure done, pid=88 2024-11-28T09:22:18,855 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=88, resume processing ppid=87 2024-11-28T09:22:18,855 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=88, ppid=87, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8990 sec 2024-11-28T09:22:18,863 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=87, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees in 1.9040 sec 2024-11-28T09:22:19,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-28T09:22:19,064 INFO [Thread-1372 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 87 completed 2024-11-28T09:22:19,065 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T09:22:19,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] procedure2.ProcedureExecutor(1098): Stored pid=89, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees 2024-11-28T09:22:19,067 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=89, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T09:22:19,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-28T09:22:19,067 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=89, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T09:22:19,067 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=90, ppid=89, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T09:22:19,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-28T09:22:19,220 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:19,223 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=90 2024-11-28T09:22:19,223 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:19,224 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2837): Flushing 0ad9609950a045418498b830dd929869 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-28T09:22:19,224 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0ad9609950a045418498b830dd929869, store=A 2024-11-28T09:22:19,224 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:19,224 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0ad9609950a045418498b830dd929869, store=B 2024-11-28T09:22:19,224 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:19,224 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0ad9609950a045418498b830dd929869, store=C 2024-11-28T09:22:19,224 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:19,240 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/e8b9441590cf40bdbcb528598ab048c9 is 50, key is test_row_0/A:col10/1732785737431/Put/seqid=0 2024-11-28T09:22:19,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742233_1409 (size=12301) 2024-11-28T09:22:19,291 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=511 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/e8b9441590cf40bdbcb528598ab048c9 2024-11-28T09:22:19,318 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/266a0357d3a94f3ea721e47305afd6c3 is 50, key is test_row_0/B:col10/1732785737431/Put/seqid=0 2024-11-28T09:22:19,368 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742234_1410 (size=12301) 2024-11-28T09:22:19,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-28T09:22:19,576 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. as already flushing 2024-11-28T09:22:19,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 0ad9609950a045418498b830dd929869 2024-11-28T09:22:19,622 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:19,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 274 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45252 deadline: 1732785799619, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:19,626 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:19,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 240 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45320 deadline: 1732785799620, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:19,627 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:19,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 262 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45346 deadline: 1732785799622, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:19,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-28T09:22:19,726 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:19,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 276 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45252 deadline: 1732785799724, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:19,730 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:19,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 242 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45320 deadline: 1732785799727, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:19,730 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:19,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 264 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45346 deadline: 1732785799728, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:19,770 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=511 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/266a0357d3a94f3ea721e47305afd6c3 2024-11-28T09:22:19,790 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/a5b7d922a6384b088c3a25609d55cc3a is 50, key is test_row_0/C:col10/1732785737431/Put/seqid=0 2024-11-28T09:22:19,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742235_1411 (size=12301) 2024-11-28T09:22:19,931 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:19,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 278 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45252 deadline: 1732785799928, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:19,935 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:19,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 244 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45320 deadline: 1732785799931, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:19,936 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:19,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 266 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45346 deadline: 1732785799932, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:20,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-28T09:22:20,237 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:20,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 280 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45252 deadline: 1732785800233, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:20,237 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=511 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/a5b7d922a6384b088c3a25609d55cc3a 2024-11-28T09:22:20,242 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:20,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 268 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45346 deadline: 1732785800237, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:20,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/e8b9441590cf40bdbcb528598ab048c9 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/e8b9441590cf40bdbcb528598ab048c9 2024-11-28T09:22:20,245 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:20,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 246 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45320 deadline: 1732785800239, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:20,248 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/e8b9441590cf40bdbcb528598ab048c9, entries=150, sequenceid=511, filesize=12.0 K 2024-11-28T09:22:20,249 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/266a0357d3a94f3ea721e47305afd6c3 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/266a0357d3a94f3ea721e47305afd6c3 2024-11-28T09:22:20,257 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/266a0357d3a94f3ea721e47305afd6c3, entries=150, sequenceid=511, filesize=12.0 K 2024-11-28T09:22:20,258 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/a5b7d922a6384b088c3a25609d55cc3a as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/a5b7d922a6384b088c3a25609d55cc3a 2024-11-28T09:22:20,267 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/a5b7d922a6384b088c3a25609d55cc3a, entries=150, sequenceid=511, filesize=12.0 K 2024-11-28T09:22:20,268 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for 0ad9609950a045418498b830dd929869 in 1044ms, sequenceid=511, compaction requested=true 2024-11-28T09:22:20,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.HRegion(2538): Flush status journal for 0ad9609950a045418498b830dd929869: 2024-11-28T09:22:20,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:20,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=90}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=90 2024-11-28T09:22:20,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4106): Remote procedure done, pid=90 2024-11-28T09:22:20,273 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=90, resume processing ppid=89 2024-11-28T09:22:20,273 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=90, ppid=89, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2050 sec 2024-11-28T09:22:20,275 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=89, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=89, table=TestAcidGuarantees in 1.2090 sec 2024-11-28T09:22:20,745 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0ad9609950a045418498b830dd929869 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-28T09:22:20,745 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0ad9609950a045418498b830dd929869, store=A 2024-11-28T09:22:20,746 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:20,746 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0ad9609950a045418498b830dd929869, store=B 2024-11-28T09:22:20,746 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:20,746 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0ad9609950a045418498b830dd929869, store=C 2024-11-28T09:22:20,746 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:20,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 0ad9609950a045418498b830dd929869 2024-11-28T09:22:20,758 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/a0845ed2c60f43b8a706f13a93cfd82d is 50, key is test_row_0/A:col10/1732785740741/Put/seqid=0 2024-11-28T09:22:20,791 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:20,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 287 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45252 deadline: 1732785800784, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:20,793 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:20,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 274 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45346 deadline: 1732785800788, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:20,793 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:20,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 252 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45320 deadline: 1732785800789, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:20,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742236_1412 (size=12301) 2024-11-28T09:22:20,822 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=532 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/a0845ed2c60f43b8a706f13a93cfd82d 2024-11-28T09:22:20,832 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/fc2bd06adfa04152acd25c947b455a93 is 50, key is test_row_0/B:col10/1732785740741/Put/seqid=0 2024-11-28T09:22:20,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742237_1413 (size=12301) 2024-11-28T09:22:20,897 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:20,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 289 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45252 deadline: 1732785800893, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:20,897 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:20,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 276 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45346 deadline: 1732785800894, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:20,898 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:20,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 254 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45320 deadline: 1732785800894, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:21,105 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:21,105 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:21,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 278 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45346 deadline: 1732785801101, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:21,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 256 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45320 deadline: 1732785801100, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:21,107 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:21,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 291 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45252 deadline: 1732785801101, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:21,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-28T09:22:21,173 INFO [Thread-1372 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 89 completed 2024-11-28T09:22:21,175 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T09:22:21,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] procedure2.ProcedureExecutor(1098): Stored pid=91, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=91, table=TestAcidGuarantees 2024-11-28T09:22:21,177 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=91, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=91, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T09:22:21,177 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=91, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=91, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T09:22:21,178 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=92, ppid=91, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T09:22:21,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-11-28T09:22:21,279 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=532 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/fc2bd06adfa04152acd25c947b455a93 2024-11-28T09:22:21,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-11-28T09:22:21,299 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/4f76377fefe649c595732d99a1d245ca is 50, key is test_row_0/C:col10/1732785740741/Put/seqid=0 2024-11-28T09:22:21,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742238_1414 (size=12301) 2024-11-28T09:22:21,341 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=532 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/4f76377fefe649c595732d99a1d245ca 2024-11-28T09:22:21,347 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/a0845ed2c60f43b8a706f13a93cfd82d as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/a0845ed2c60f43b8a706f13a93cfd82d 2024-11-28T09:22:21,348 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:21,348 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-11-28T09:22:21,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:21,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. as already flushing 2024-11-28T09:22:21,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:21,349 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:21,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:21,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:21,364 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/a0845ed2c60f43b8a706f13a93cfd82d, entries=150, sequenceid=532, filesize=12.0 K 2024-11-28T09:22:21,366 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/fc2bd06adfa04152acd25c947b455a93 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/fc2bd06adfa04152acd25c947b455a93 2024-11-28T09:22:21,369 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/fc2bd06adfa04152acd25c947b455a93, entries=150, sequenceid=532, filesize=12.0 K 2024-11-28T09:22:21,371 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/4f76377fefe649c595732d99a1d245ca as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/4f76377fefe649c595732d99a1d245ca 2024-11-28T09:22:21,375 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/4f76377fefe649c595732d99a1d245ca, entries=150, sequenceid=532, filesize=12.0 K 2024-11-28T09:22:21,377 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for 0ad9609950a045418498b830dd929869 in 632ms, sequenceid=532, compaction requested=true 2024-11-28T09:22:21,377 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0ad9609950a045418498b830dd929869: 2024-11-28T09:22:21,377 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0ad9609950a045418498b830dd929869:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T09:22:21,377 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:22:21,377 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0ad9609950a045418498b830dd929869:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T09:22:21,377 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-28T09:22:21,377 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0ad9609950a045418498b830dd929869:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T09:22:21,377 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-11-28T09:22:21,377 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T09:22:21,377 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T09:22:21,380 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50328 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T09:22:21,380 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50328 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T09:22:21,380 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1540): 0ad9609950a045418498b830dd929869/A is initiating minor compaction (all files) 2024-11-28T09:22:21,380 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): 0ad9609950a045418498b830dd929869/C is initiating minor compaction (all files) 2024-11-28T09:22:21,380 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0ad9609950a045418498b830dd929869/A in TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:21,380 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0ad9609950a045418498b830dd929869/C in TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:21,380 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/beafa48671ec4afb97a85e2f71e79929, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/c0d466f910f4467783cd60b8a25b9b51, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/e8b9441590cf40bdbcb528598ab048c9, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/a0845ed2c60f43b8a706f13a93cfd82d] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp, totalSize=49.1 K 2024-11-28T09:22:21,380 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/f65b94d7c2974570bd04ad1f00fc6e97, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/8c23ed2b64f5422d9e64045835fda331, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/a5b7d922a6384b088c3a25609d55cc3a, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/4f76377fefe649c595732d99a1d245ca] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp, totalSize=49.1 K 2024-11-28T09:22:21,381 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting beafa48671ec4afb97a85e2f71e79929, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=472, earliestPutTs=1732785736145 2024-11-28T09:22:21,381 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting f65b94d7c2974570bd04ad1f00fc6e97, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=472, earliestPutTs=1732785736145 2024-11-28T09:22:21,381 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting c0d466f910f4467783cd60b8a25b9b51, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=494, earliestPutTs=1732785736785 2024-11-28T09:22:21,382 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 8c23ed2b64f5422d9e64045835fda331, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=494, earliestPutTs=1732785736785 2024-11-28T09:22:21,382 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting e8b9441590cf40bdbcb528598ab048c9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=511, earliestPutTs=1732785737431 2024-11-28T09:22:21,382 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting a5b7d922a6384b088c3a25609d55cc3a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=511, earliestPutTs=1732785737431 2024-11-28T09:22:21,382 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting a0845ed2c60f43b8a706f13a93cfd82d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=532, earliestPutTs=1732785739611 2024-11-28T09:22:21,383 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 4f76377fefe649c595732d99a1d245ca, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=532, earliestPutTs=1732785739611 2024-11-28T09:22:21,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 0ad9609950a045418498b830dd929869 2024-11-28T09:22:21,413 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0ad9609950a045418498b830dd929869 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-28T09:22:21,415 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0ad9609950a045418498b830dd929869, store=A 2024-11-28T09:22:21,415 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:21,415 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0ad9609950a045418498b830dd929869, store=B 2024-11-28T09:22:21,415 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:21,415 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0ad9609950a045418498b830dd929869, store=C 2024-11-28T09:22:21,415 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:21,418 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0ad9609950a045418498b830dd929869#A#compaction#360 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:22:21,418 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/d98cf9fda27a487687e0e75bbb3c53fa is 50, key is test_row_0/A:col10/1732785740741/Put/seqid=0 2024-11-28T09:22:21,423 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/00cc467266824e62ba7d56607af4e843 is 50, key is test_row_0/A:col10/1732785741412/Put/seqid=0 2024-11-28T09:22:21,438 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0ad9609950a045418498b830dd929869#C#compaction#362 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:22:21,439 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/32982aa720de44fca1d1029d23b46d91 is 50, key is test_row_0/C:col10/1732785740741/Put/seqid=0 2024-11-28T09:22:21,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742240_1416 (size=12301) 2024-11-28T09:22:21,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-11-28T09:22:21,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742239_1415 (size=13561) 2024-11-28T09:22:21,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742241_1417 (size=13561) 2024-11-28T09:22:21,503 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:21,504 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-11-28T09:22:21,504 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:21,504 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. as already flushing 2024-11-28T09:22:21,504 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:21,504 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:21,504 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:21,510 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/d98cf9fda27a487687e0e75bbb3c53fa as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/d98cf9fda27a487687e0e75bbb3c53fa 2024-11-28T09:22:21,510 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/32982aa720de44fca1d1029d23b46d91 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/32982aa720de44fca1d1029d23b46d91 2024-11-28T09:22:21,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:21,518 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 0ad9609950a045418498b830dd929869/A of 0ad9609950a045418498b830dd929869 into d98cf9fda27a487687e0e75bbb3c53fa(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:22:21,518 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0ad9609950a045418498b830dd929869: 2024-11-28T09:22:21,518 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869., storeName=0ad9609950a045418498b830dd929869/A, priority=12, startTime=1732785741377; duration=0sec 2024-11-28T09:22:21,519 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:22:21,519 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0ad9609950a045418498b830dd929869:A 2024-11-28T09:22:21,519 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T09:22:21,520 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 0ad9609950a045418498b830dd929869/C of 0ad9609950a045418498b830dd929869 into 32982aa720de44fca1d1029d23b46d91(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:22:21,520 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0ad9609950a045418498b830dd929869: 2024-11-28T09:22:21,520 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869., storeName=0ad9609950a045418498b830dd929869/C, priority=12, startTime=1732785741377; duration=0sec 2024-11-28T09:22:21,520 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:22:21,520 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0ad9609950a045418498b830dd929869:C 2024-11-28T09:22:21,522 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:21,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 299 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45252 deadline: 1732785801506, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:21,523 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50328 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T09:22:21,523 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1540): 0ad9609950a045418498b830dd929869/B is initiating minor compaction (all files) 2024-11-28T09:22:21,523 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0ad9609950a045418498b830dd929869/B in TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:21,523 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/febb343dc0e44f97b5239c130213a473, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/46383e659f7e407f9f1d514f5edceb61, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/266a0357d3a94f3ea721e47305afd6c3, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/fc2bd06adfa04152acd25c947b455a93] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp, totalSize=49.1 K 2024-11-28T09:22:21,524 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting febb343dc0e44f97b5239c130213a473, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=472, earliestPutTs=1732785736145 2024-11-28T09:22:21,525 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 46383e659f7e407f9f1d514f5edceb61, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=494, earliestPutTs=1732785736785 2024-11-28T09:22:21,526 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 266a0357d3a94f3ea721e47305afd6c3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=511, earliestPutTs=1732785737431 2024-11-28T09:22:21,526 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting fc2bd06adfa04152acd25c947b455a93, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=532, earliestPutTs=1732785739611 2024-11-28T09:22:21,535 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:21,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 287 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45346 deadline: 1732785801520, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:21,535 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:21,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 264 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45320 deadline: 1732785801521, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:21,537 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0ad9609950a045418498b830dd929869#B#compaction#363 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:22:21,538 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/d459baf8379744e3b6ce9cb210b8e37a is 50, key is test_row_0/B:col10/1732785740741/Put/seqid=0 2024-11-28T09:22:21,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742242_1418 (size=13561) 2024-11-28T09:22:21,575 DEBUG [Thread-1373 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x560ec309 to 127.0.0.1:53251 2024-11-28T09:22:21,575 DEBUG [Thread-1373 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T09:22:21,577 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/d459baf8379744e3b6ce9cb210b8e37a as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/d459baf8379744e3b6ce9cb210b8e37a 2024-11-28T09:22:21,582 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 0ad9609950a045418498b830dd929869/B of 0ad9609950a045418498b830dd929869 into d459baf8379744e3b6ce9cb210b8e37a(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:22:21,582 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0ad9609950a045418498b830dd929869: 2024-11-28T09:22:21,582 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869., storeName=0ad9609950a045418498b830dd929869/B, priority=12, startTime=1732785741377; duration=0sec 2024-11-28T09:22:21,582 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:22:21,582 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0ad9609950a045418498b830dd929869:B 2024-11-28T09:22:21,583 DEBUG [Thread-1375 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5886c0f2 to 127.0.0.1:53251 2024-11-28T09:22:21,583 DEBUG [Thread-1377 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x66e575aa to 127.0.0.1:53251 2024-11-28T09:22:21,583 DEBUG [Thread-1379 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x131ceb8f to 127.0.0.1:53251 2024-11-28T09:22:21,583 DEBUG [Thread-1375 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T09:22:21,583 DEBUG [Thread-1377 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T09:22:21,583 DEBUG [Thread-1379 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T09:22:21,585 DEBUG [Thread-1381 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5a78bf6d to 127.0.0.1:53251 2024-11-28T09:22:21,585 DEBUG [Thread-1381 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T09:22:21,624 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:21,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 301 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45252 deadline: 1732785801623, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:21,637 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:21,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 289 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45346 deadline: 1732785801637, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:21,644 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:21,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 266 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45320 deadline: 1732785801643, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:21,664 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:21,664 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-11-28T09:22:21,665 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:21,665 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. as already flushing 2024-11-28T09:22:21,665 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:21,665 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:21,665 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:21,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:21,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-11-28T09:22:21,818 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:21,818 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-11-28T09:22:21,818 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:21,819 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. as already flushing 2024-11-28T09:22:21,819 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:21,819 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:21,819 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:21,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:21,825 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:21,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 303 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45252 deadline: 1732785801825, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:21,839 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:21,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 291 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45346 deadline: 1732785801839, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:21,846 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:21,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 268 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:45320 deadline: 1732785801846, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:21,887 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=549 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/00cc467266824e62ba7d56607af4e843 2024-11-28T09:22:21,918 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/856c45bd2b074dd19c5bf0958b3ff074 is 50, key is test_row_0/B:col10/1732785741412/Put/seqid=0 2024-11-28T09:22:21,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742243_1419 (size=12301) 2024-11-28T09:22:21,948 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=549 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/856c45bd2b074dd19c5bf0958b3ff074 2024-11-28T09:22:21,966 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/142c41e4920d4bcc9ff35d363279f2a2 is 50, key is test_row_0/C:col10/1732785741412/Put/seqid=0 2024-11-28T09:22:21,973 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:21,974 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-11-28T09:22:21,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:21,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. as already flushing 2024-11-28T09:22:21,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:21,975 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:21,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:21,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:22,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742244_1420 (size=12301) 2024-11-28T09:22:22,011 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=549 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/142c41e4920d4bcc9ff35d363279f2a2 2024-11-28T09:22:22,016 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/00cc467266824e62ba7d56607af4e843 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/00cc467266824e62ba7d56607af4e843 2024-11-28T09:22:22,021 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/00cc467266824e62ba7d56607af4e843, entries=150, sequenceid=549, filesize=12.0 K 2024-11-28T09:22:22,022 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/856c45bd2b074dd19c5bf0958b3ff074 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/856c45bd2b074dd19c5bf0958b3ff074 2024-11-28T09:22:22,026 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/856c45bd2b074dd19c5bf0958b3ff074, entries=150, sequenceid=549, filesize=12.0 K 2024-11-28T09:22:22,026 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/142c41e4920d4bcc9ff35d363279f2a2 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/142c41e4920d4bcc9ff35d363279f2a2 2024-11-28T09:22:22,030 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/142c41e4920d4bcc9ff35d363279f2a2, entries=150, sequenceid=549, filesize=12.0 K 2024-11-28T09:22:22,031 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=114.05 KB/116790 for 0ad9609950a045418498b830dd929869 in 618ms, sequenceid=549, compaction requested=false 2024-11-28T09:22:22,031 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0ad9609950a045418498b830dd929869: 2024-11-28T09:22:22,128 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:22,129 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-11-28T09:22:22,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 0ad9609950a045418498b830dd929869 2024-11-28T09:22:22,130 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0ad9609950a045418498b830dd929869 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-28T09:22:22,130 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0ad9609950a045418498b830dd929869, store=A 2024-11-28T09:22:22,130 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:22,130 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0ad9609950a045418498b830dd929869, store=B 2024-11-28T09:22:22,130 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:22,130 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0ad9609950a045418498b830dd929869, store=C 2024-11-28T09:22:22,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:22,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. as already flushing 2024-11-28T09:22:22,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:22,130 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:22,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:22,130 DEBUG [Thread-1362 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x17b6adc5 to 127.0.0.1:53251 2024-11-28T09:22:22,131 DEBUG [Thread-1362 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T09:22:22,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:22,130 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:22,143 DEBUG [Thread-1364 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x669e1999 to 127.0.0.1:53251 2024-11-28T09:22:22,143 DEBUG [Thread-1364 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T09:22:22,152 DEBUG [Thread-1370 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4dfb20f6 to 127.0.0.1:53251 2024-11-28T09:22:22,152 DEBUG [Thread-1370 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T09:22:22,155 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/7af86d58eb94443ab0524d5e8e0e6e6a is 50, key is test_row_0/A:col10/1732785741505/Put/seqid=0 2024-11-28T09:22:22,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742245_1421 (size=12301) 2024-11-28T09:22:22,191 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=573 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/7af86d58eb94443ab0524d5e8e0e6e6a 2024-11-28T09:22:22,210 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/25bf92662b6648ddbd392187015ea5f7 is 50, key is test_row_0/B:col10/1732785741505/Put/seqid=0 2024-11-28T09:22:22,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742246_1422 (size=12301) 2024-11-28T09:22:22,242 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=573 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/25bf92662b6648ddbd392187015ea5f7 2024-11-28T09:22:22,270 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/5d75520fd1074f229c58d24d6d12ae88 is 50, key is test_row_0/C:col10/1732785741505/Put/seqid=0 2024-11-28T09:22:22,283 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:22,284 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-11-28T09:22:22,284 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:22,284 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. as already flushing 2024-11-28T09:22:22,284 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:22,284 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:22,285 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:22,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:22,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-11-28T09:22:22,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742247_1423 (size=12301) 2024-11-28T09:22:22,438 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:22,439 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-11-28T09:22:22,439 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:22,439 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. as already flushing 2024-11-28T09:22:22,439 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:22,439 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:22,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:22,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:22,591 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:22,592 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-11-28T09:22:22,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:22,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. as already flushing 2024-11-28T09:22:22,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:22,592 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] handler.RSProcedureHandler(58): pid=92 java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:22,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=92 java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:22,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=92 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:22,702 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=573 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/5d75520fd1074f229c58d24d6d12ae88 2024-11-28T09:22:22,708 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/7af86d58eb94443ab0524d5e8e0e6e6a as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/7af86d58eb94443ab0524d5e8e0e6e6a 2024-11-28T09:22:22,718 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/7af86d58eb94443ab0524d5e8e0e6e6a, entries=150, sequenceid=573, filesize=12.0 K 2024-11-28T09:22:22,719 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/25bf92662b6648ddbd392187015ea5f7 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/25bf92662b6648ddbd392187015ea5f7 2024-11-28T09:22:22,726 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/25bf92662b6648ddbd392187015ea5f7, entries=150, sequenceid=573, filesize=12.0 K 2024-11-28T09:22:22,727 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/5d75520fd1074f229c58d24d6d12ae88 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/5d75520fd1074f229c58d24d6d12ae88 2024-11-28T09:22:22,732 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/5d75520fd1074f229c58d24d6d12ae88, entries=150, sequenceid=573, filesize=12.0 K 2024-11-28T09:22:22,734 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=13.42 KB/13740 for 0ad9609950a045418498b830dd929869 in 604ms, sequenceid=573, compaction requested=true 2024-11-28T09:22:22,735 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0ad9609950a045418498b830dd929869: 2024-11-28T09:22:22,735 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0ad9609950a045418498b830dd929869:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T09:22:22,735 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:22:22,735 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0ad9609950a045418498b830dd929869:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T09:22:22,735 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-28T09:22:22,735 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0ad9609950a045418498b830dd929869:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T09:22:22,735 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-11-28T09:22:22,735 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:22:22,735 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:22:22,737 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38163 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:22:22,737 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1540): 0ad9609950a045418498b830dd929869/A is initiating minor compaction (all files) 2024-11-28T09:22:22,738 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0ad9609950a045418498b830dd929869/A in TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:22,738 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/d98cf9fda27a487687e0e75bbb3c53fa, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/00cc467266824e62ba7d56607af4e843, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/7af86d58eb94443ab0524d5e8e0e6e6a] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp, totalSize=37.3 K 2024-11-28T09:22:22,738 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38163 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:22:22,738 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): 0ad9609950a045418498b830dd929869/C is initiating minor compaction (all files) 2024-11-28T09:22:22,738 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting d98cf9fda27a487687e0e75bbb3c53fa, keycount=150, bloomtype=ROW, size=13.2 K, encoding=NONE, compression=NONE, seqNum=532, earliestPutTs=1732785739611 2024-11-28T09:22:22,738 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0ad9609950a045418498b830dd929869/C in TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:22,738 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/32982aa720de44fca1d1029d23b46d91, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/142c41e4920d4bcc9ff35d363279f2a2, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/5d75520fd1074f229c58d24d6d12ae88] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp, totalSize=37.3 K 2024-11-28T09:22:22,739 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 32982aa720de44fca1d1029d23b46d91, keycount=150, bloomtype=ROW, size=13.2 K, encoding=NONE, compression=NONE, seqNum=532, earliestPutTs=1732785739611 2024-11-28T09:22:22,739 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 00cc467266824e62ba7d56607af4e843, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=549, earliestPutTs=1732785740777 2024-11-28T09:22:22,740 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 142c41e4920d4bcc9ff35d363279f2a2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=549, earliestPutTs=1732785740777 2024-11-28T09:22:22,740 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7af86d58eb94443ab0524d5e8e0e6e6a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=573, earliestPutTs=1732785741505 2024-11-28T09:22:22,740 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 5d75520fd1074f229c58d24d6d12ae88, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=573, earliestPutTs=1732785741505 2024-11-28T09:22:22,750 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:22,751 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=92 2024-11-28T09:22:22,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:22,751 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2837): Flushing 0ad9609950a045418498b830dd929869 3/3 column families, dataSize=13.42 KB heapSize=35.91 KB 2024-11-28T09:22:22,752 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0ad9609950a045418498b830dd929869, store=A 2024-11-28T09:22:22,752 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:22,752 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0ad9609950a045418498b830dd929869, store=B 2024-11-28T09:22:22,752 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:22,752 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0ad9609950a045418498b830dd929869, store=C 2024-11-28T09:22:22,752 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:22,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/bdd5842e606a40b0b87483a401110076 is 50, key is test_row_1/A:col10/1732785742150/Put/seqid=0 2024-11-28T09:22:22,795 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0ad9609950a045418498b830dd929869#C#compaction#370 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:22:22,796 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/e6fbb86ae2ad4f52aee4594b4306a596 is 50, key is test_row_0/C:col10/1732785741505/Put/seqid=0 2024-11-28T09:22:22,810 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0ad9609950a045418498b830dd929869#A#compaction#371 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:22:22,811 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/2ebfdebea6994cfaae1d15d7e4214c5f is 50, key is test_row_0/A:col10/1732785741505/Put/seqid=0 2024-11-28T09:22:22,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742248_1424 (size=9857) 2024-11-28T09:22:22,850 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.47 KB at sequenceid=578 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/bdd5842e606a40b0b87483a401110076 2024-11-28T09:22:22,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742249_1425 (size=13663) 2024-11-28T09:22:22,873 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/e6fbb86ae2ad4f52aee4594b4306a596 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/e6fbb86ae2ad4f52aee4594b4306a596 2024-11-28T09:22:22,879 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0ad9609950a045418498b830dd929869/C of 0ad9609950a045418498b830dd929869 into e6fbb86ae2ad4f52aee4594b4306a596(size=13.3 K), total size for store is 13.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:22:22,879 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0ad9609950a045418498b830dd929869: 2024-11-28T09:22:22,879 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869., storeName=0ad9609950a045418498b830dd929869/C, priority=13, startTime=1732785742735; duration=0sec 2024-11-28T09:22:22,879 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:22:22,879 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0ad9609950a045418498b830dd929869:C 2024-11-28T09:22:22,879 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:22:22,881 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38163 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:22:22,882 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): 0ad9609950a045418498b830dd929869/B is initiating minor compaction (all files) 2024-11-28T09:22:22,882 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0ad9609950a045418498b830dd929869/B in TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:22,882 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/d459baf8379744e3b6ce9cb210b8e37a, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/856c45bd2b074dd19c5bf0958b3ff074, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/25bf92662b6648ddbd392187015ea5f7] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp, totalSize=37.3 K 2024-11-28T09:22:22,882 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting d459baf8379744e3b6ce9cb210b8e37a, keycount=150, bloomtype=ROW, size=13.2 K, encoding=NONE, compression=NONE, seqNum=532, earliestPutTs=1732785739611 2024-11-28T09:22:22,884 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 856c45bd2b074dd19c5bf0958b3ff074, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=549, earliestPutTs=1732785740777 2024-11-28T09:22:22,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/545e1623ddad454d8bd983e2d22d48bb is 50, key is test_row_1/B:col10/1732785742150/Put/seqid=0 2024-11-28T09:22:22,885 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 25bf92662b6648ddbd392187015ea5f7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=573, earliestPutTs=1732785741505 2024-11-28T09:22:22,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742250_1426 (size=13663) 2024-11-28T09:22:22,896 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/2ebfdebea6994cfaae1d15d7e4214c5f as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/2ebfdebea6994cfaae1d15d7e4214c5f 2024-11-28T09:22:22,901 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0ad9609950a045418498b830dd929869/A of 0ad9609950a045418498b830dd929869 into 2ebfdebea6994cfaae1d15d7e4214c5f(size=13.3 K), total size for store is 13.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:22:22,901 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0ad9609950a045418498b830dd929869: 2024-11-28T09:22:22,901 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869., storeName=0ad9609950a045418498b830dd929869/A, priority=13, startTime=1732785742735; duration=0sec 2024-11-28T09:22:22,901 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:22:22,901 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0ad9609950a045418498b830dd929869:A 2024-11-28T09:22:22,903 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0ad9609950a045418498b830dd929869#B#compaction#373 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:22:22,906 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/f6d4289c100b40e484ed3f6cb3b130e1 is 50, key is test_row_0/B:col10/1732785741505/Put/seqid=0 2024-11-28T09:22:22,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742251_1427 (size=9857) 2024-11-28T09:22:22,920 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.47 KB at sequenceid=578 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/545e1623ddad454d8bd983e2d22d48bb 2024-11-28T09:22:22,946 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/13a8b28f7aa2481389e05bd575744d30 is 50, key is test_row_1/C:col10/1732785742150/Put/seqid=0 2024-11-28T09:22:22,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742252_1428 (size=13663) 2024-11-28T09:22:22,962 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/f6d4289c100b40e484ed3f6cb3b130e1 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/f6d4289c100b40e484ed3f6cb3b130e1 2024-11-28T09:22:22,967 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0ad9609950a045418498b830dd929869/B of 0ad9609950a045418498b830dd929869 into f6d4289c100b40e484ed3f6cb3b130e1(size=13.3 K), total size for store is 13.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:22:22,967 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0ad9609950a045418498b830dd929869: 2024-11-28T09:22:22,967 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869., storeName=0ad9609950a045418498b830dd929869/B, priority=13, startTime=1732785742735; duration=0sec 2024-11-28T09:22:22,968 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:22:22,968 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0ad9609950a045418498b830dd929869:B 2024-11-28T09:22:22,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742253_1429 (size=9857) 2024-11-28T09:22:22,975 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.47 KB at sequenceid=578 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/13a8b28f7aa2481389e05bd575744d30 2024-11-28T09:22:22,979 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/bdd5842e606a40b0b87483a401110076 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/bdd5842e606a40b0b87483a401110076 2024-11-28T09:22:22,983 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/bdd5842e606a40b0b87483a401110076, entries=100, sequenceid=578, filesize=9.6 K 2024-11-28T09:22:22,984 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/545e1623ddad454d8bd983e2d22d48bb as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/545e1623ddad454d8bd983e2d22d48bb 2024-11-28T09:22:22,989 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/545e1623ddad454d8bd983e2d22d48bb, entries=100, sequenceid=578, filesize=9.6 K 2024-11-28T09:22:22,990 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/13a8b28f7aa2481389e05bd575744d30 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/13a8b28f7aa2481389e05bd575744d30 2024-11-28T09:22:22,994 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/13a8b28f7aa2481389e05bd575744d30, entries=100, sequenceid=578, filesize=9.6 K 2024-11-28T09:22:22,995 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(3040): Finished flush of dataSize ~13.42 KB/13740, heapSize ~35.86 KB/36720, currentSize=0 B/0 for 0ad9609950a045418498b830dd929869 in 244ms, sequenceid=578, compaction requested=false 2024-11-28T09:22:22,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.HRegion(2538): Flush status journal for 0ad9609950a045418498b830dd929869: 2024-11-28T09:22:22,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:22,995 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=92}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=92 2024-11-28T09:22:22,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4106): Remote procedure done, pid=92 2024-11-28T09:22:22,998 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=92, resume processing ppid=91 2024-11-28T09:22:22,998 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=92, ppid=91, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8180 sec 2024-11-28T09:22:23,000 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=91, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=91, table=TestAcidGuarantees in 1.8240 sec 2024-11-28T09:22:23,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=91 2024-11-28T09:22:23,291 INFO [Thread-1372 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 91 completed 2024-11-28T09:22:23,777 DEBUG [Thread-1368 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4ec09297 to 127.0.0.1:53251 2024-11-28T09:22:23,777 DEBUG [Thread-1368 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T09:22:23,792 DEBUG [Thread-1366 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x72aa9ee5 to 127.0.0.1:53251 2024-11-28T09:22:23,792 DEBUG [Thread-1366 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T09:22:23,792 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-28T09:22:23,792 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 146 2024-11-28T09:22:23,792 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 150 2024-11-28T09:22:23,792 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 19 2024-11-28T09:22:23,793 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 30 2024-11-28T09:22:23,793 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 111 2024-11-28T09:22:23,793 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-28T09:22:23,793 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4430 2024-11-28T09:22:23,793 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4263 2024-11-28T09:22:23,793 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4297 2024-11-28T09:22:23,793 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4488 2024-11-28T09:22:23,793 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4263 2024-11-28T09:22:23,793 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-28T09:22:23,793 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-28T09:22:23,793 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7f66057f to 127.0.0.1:53251 2024-11-28T09:22:23,793 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T09:22:23,795 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-28T09:22:23,795 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-28T09:22:23,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] procedure2.ProcedureExecutor(1098): Stored pid=93, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-28T09:22:23,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-11-28T09:22:23,799 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732785743799"}]},"ts":"1732785743799"} 2024-11-28T09:22:23,800 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-28T09:22:23,802 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-28T09:22:23,803 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=94, ppid=93, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-28T09:22:23,804 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=95, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=0ad9609950a045418498b830dd929869, UNASSIGN}] 2024-11-28T09:22:23,804 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=95, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=0ad9609950a045418498b830dd929869, UNASSIGN 2024-11-28T09:22:23,805 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=95 updating hbase:meta row=0ad9609950a045418498b830dd929869, regionState=CLOSING, regionLocation=363d8d38a970,33819,1732785660637 2024-11-28T09:22:23,806 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-28T09:22:23,806 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=96, ppid=95, state=RUNNABLE; CloseRegionProcedure 0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637}] 2024-11-28T09:22:23,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-11-28T09:22:23,957 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:23,958 INFO [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] handler.UnassignRegionHandler(124): Close 0ad9609950a045418498b830dd929869 2024-11-28T09:22:23,958 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-28T09:22:23,958 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1681): Closing 0ad9609950a045418498b830dd929869, disabling compactions & flushes 2024-11-28T09:22:23,958 INFO [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:23,958 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:23,958 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. after waiting 0 ms 2024-11-28T09:22:23,958 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:23,959 INFO [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(2837): Flushing 0ad9609950a045418498b830dd929869 3/3 column families, dataSize=13.42 KB heapSize=35.91 KB 2024-11-28T09:22:23,959 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0ad9609950a045418498b830dd929869, store=A 2024-11-28T09:22:23,959 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:23,959 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0ad9609950a045418498b830dd929869, store=B 2024-11-28T09:22:23,959 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:23,959 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0ad9609950a045418498b830dd929869, store=C 2024-11-28T09:22:23,959 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:23,965 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/0c93be93e118480790f7105bd0ff6735 is 50, key is test_row_0/A:col10/1732785743791/Put/seqid=0 2024-11-28T09:22:23,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742254_1430 (size=7415) 2024-11-28T09:22:24,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-11-28T09:22:24,395 INFO [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.47 KB at sequenceid=586 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/0c93be93e118480790f7105bd0ff6735 2024-11-28T09:22:24,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-11-28T09:22:24,406 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/8c2d5ea3084c43669ccfd6155cc20f84 is 50, key is test_row_0/B:col10/1732785743791/Put/seqid=0 2024-11-28T09:22:24,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742255_1431 (size=7415) 2024-11-28T09:22:24,454 INFO [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.47 KB at sequenceid=586 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/8c2d5ea3084c43669ccfd6155cc20f84 2024-11-28T09:22:24,471 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/a15dba0d064340caba3b242dc01a2eec is 50, key is test_row_0/C:col10/1732785743791/Put/seqid=0 2024-11-28T09:22:24,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742256_1432 (size=7415) 2024-11-28T09:22:24,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-11-28T09:22:24,904 INFO [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.47 KB at sequenceid=586 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/a15dba0d064340caba3b242dc01a2eec 2024-11-28T09:22:24,909 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/A/0c93be93e118480790f7105bd0ff6735 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/0c93be93e118480790f7105bd0ff6735 2024-11-28T09:22:24,912 INFO [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/0c93be93e118480790f7105bd0ff6735, entries=50, sequenceid=586, filesize=7.2 K 2024-11-28T09:22:24,912 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/B/8c2d5ea3084c43669ccfd6155cc20f84 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/8c2d5ea3084c43669ccfd6155cc20f84 2024-11-28T09:22:24,916 INFO [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/8c2d5ea3084c43669ccfd6155cc20f84, entries=50, sequenceid=586, filesize=7.2 K 2024-11-28T09:22:24,917 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/.tmp/C/a15dba0d064340caba3b242dc01a2eec as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/a15dba0d064340caba3b242dc01a2eec 2024-11-28T09:22:24,921 INFO [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/a15dba0d064340caba3b242dc01a2eec, entries=50, sequenceid=586, filesize=7.2 K 2024-11-28T09:22:24,921 INFO [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(3040): Finished flush of dataSize ~13.42 KB/13740, heapSize ~35.86 KB/36720, currentSize=0 B/0 for 0ad9609950a045418498b830dd929869 in 963ms, sequenceid=586, compaction requested=true 2024-11-28T09:22:24,922 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/675b60891b534886a24e8a9762d211ea, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/5f9446a79bca454e92dc8822f5d12911, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/42cb03a9f89e4a58829b79da7f541aff, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/cb877df785dd438aa7ebb9fa0ee7b34f, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/f255eb4b525f4ba19001249f045636c4, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/aef9b9acca704228ae05a3416c02af30, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/16a9bfcdd6124e1980c7a6a70a6b1fc0, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/2cbc3d42bc2147a89a21abd7719b0e9f, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/424d140980b04ff191b26863e6f5df87, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/5a4a3b223a1044adbe744dcadd0f2370, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/78112ce154f040fd9e3e1f443f76e08d, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/93ad89726bcb4300a7123ea0b973b7ac, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/36d116bb52114d908999194b9cf3dea1, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/55d60a3ae77e4356bb77567623fcd4f8, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/abdbdc02e8004d299c06f24a2298d27f, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/e87a4dff73df41fba87662a447a3cff3, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/b7af2c0b90b94516bebc9523c421f1bb, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/07533d667578455da841b163e49a5098, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/ba1ee478c2544492a7ebcaa6f3add6c7, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/d19c5b1806e14641b40584f192ede476, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/734abe7c1158428991505d6d4268d530, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/33b4a4fe303543159423a9c80b496cc7, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/7436708ef4ed4b5d945809b2da0df232, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/a760833c19e04816a745bcf909b85799, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/3f5e7bf7aa8845d7b37200ce45870c69, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/ddb193e84dc64cbeba447aa63aa1761d, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/2105ced845bd4d15a1396c4d4bf44bab, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/553835f43d5742ef8ef8cdcb55405b05, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/1cc3dca00d2d4fd9a6e3f8eb95a1eb95, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/e414b2e3af3845f2b32092d3156157db, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/1cb5c373087545fc8558231813fcb1bd, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/746661b693174a64876fb98985dfd950, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/4e45d21289dc4263aa23838c4b697a15, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/beafa48671ec4afb97a85e2f71e79929, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/c0d466f910f4467783cd60b8a25b9b51, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/e8b9441590cf40bdbcb528598ab048c9, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/d98cf9fda27a487687e0e75bbb3c53fa, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/a0845ed2c60f43b8a706f13a93cfd82d, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/00cc467266824e62ba7d56607af4e843, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/7af86d58eb94443ab0524d5e8e0e6e6a] to archive 2024-11-28T09:22:24,924 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-28T09:22:24,926 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/675b60891b534886a24e8a9762d211ea to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/675b60891b534886a24e8a9762d211ea 2024-11-28T09:22:24,930 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/5f9446a79bca454e92dc8822f5d12911 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/5f9446a79bca454e92dc8822f5d12911 2024-11-28T09:22:24,932 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/42cb03a9f89e4a58829b79da7f541aff to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/42cb03a9f89e4a58829b79da7f541aff 2024-11-28T09:22:24,934 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/cb877df785dd438aa7ebb9fa0ee7b34f to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/cb877df785dd438aa7ebb9fa0ee7b34f 2024-11-28T09:22:24,936 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/f255eb4b525f4ba19001249f045636c4 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/f255eb4b525f4ba19001249f045636c4 2024-11-28T09:22:24,938 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/aef9b9acca704228ae05a3416c02af30 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/aef9b9acca704228ae05a3416c02af30 2024-11-28T09:22:24,940 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/16a9bfcdd6124e1980c7a6a70a6b1fc0 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/16a9bfcdd6124e1980c7a6a70a6b1fc0 2024-11-28T09:22:24,941 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/2cbc3d42bc2147a89a21abd7719b0e9f to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/2cbc3d42bc2147a89a21abd7719b0e9f 2024-11-28T09:22:24,942 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/424d140980b04ff191b26863e6f5df87 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/424d140980b04ff191b26863e6f5df87 2024-11-28T09:22:24,944 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/5a4a3b223a1044adbe744dcadd0f2370 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/5a4a3b223a1044adbe744dcadd0f2370 2024-11-28T09:22:24,945 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/78112ce154f040fd9e3e1f443f76e08d to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/78112ce154f040fd9e3e1f443f76e08d 2024-11-28T09:22:24,946 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/93ad89726bcb4300a7123ea0b973b7ac to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/93ad89726bcb4300a7123ea0b973b7ac 2024-11-28T09:22:24,948 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/36d116bb52114d908999194b9cf3dea1 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/36d116bb52114d908999194b9cf3dea1 2024-11-28T09:22:24,950 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/55d60a3ae77e4356bb77567623fcd4f8 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/55d60a3ae77e4356bb77567623fcd4f8 2024-11-28T09:22:24,951 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/abdbdc02e8004d299c06f24a2298d27f to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/abdbdc02e8004d299c06f24a2298d27f 2024-11-28T09:22:24,952 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/e87a4dff73df41fba87662a447a3cff3 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/e87a4dff73df41fba87662a447a3cff3 2024-11-28T09:22:24,954 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/b7af2c0b90b94516bebc9523c421f1bb to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/b7af2c0b90b94516bebc9523c421f1bb 2024-11-28T09:22:24,955 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/07533d667578455da841b163e49a5098 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/07533d667578455da841b163e49a5098 2024-11-28T09:22:24,956 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/ba1ee478c2544492a7ebcaa6f3add6c7 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/ba1ee478c2544492a7ebcaa6f3add6c7 2024-11-28T09:22:24,958 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/d19c5b1806e14641b40584f192ede476 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/d19c5b1806e14641b40584f192ede476 2024-11-28T09:22:24,959 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/734abe7c1158428991505d6d4268d530 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/734abe7c1158428991505d6d4268d530 2024-11-28T09:22:24,961 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/33b4a4fe303543159423a9c80b496cc7 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/33b4a4fe303543159423a9c80b496cc7 2024-11-28T09:22:24,962 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/7436708ef4ed4b5d945809b2da0df232 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/7436708ef4ed4b5d945809b2da0df232 2024-11-28T09:22:24,963 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/a760833c19e04816a745bcf909b85799 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/a760833c19e04816a745bcf909b85799 2024-11-28T09:22:24,965 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/3f5e7bf7aa8845d7b37200ce45870c69 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/3f5e7bf7aa8845d7b37200ce45870c69 2024-11-28T09:22:24,966 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/ddb193e84dc64cbeba447aa63aa1761d to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/ddb193e84dc64cbeba447aa63aa1761d 2024-11-28T09:22:24,967 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/2105ced845bd4d15a1396c4d4bf44bab to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/2105ced845bd4d15a1396c4d4bf44bab 2024-11-28T09:22:24,969 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/553835f43d5742ef8ef8cdcb55405b05 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/553835f43d5742ef8ef8cdcb55405b05 2024-11-28T09:22:24,970 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/1cc3dca00d2d4fd9a6e3f8eb95a1eb95 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/1cc3dca00d2d4fd9a6e3f8eb95a1eb95 2024-11-28T09:22:24,972 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/e414b2e3af3845f2b32092d3156157db to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/e414b2e3af3845f2b32092d3156157db 2024-11-28T09:22:24,976 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/1cb5c373087545fc8558231813fcb1bd to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/1cb5c373087545fc8558231813fcb1bd 2024-11-28T09:22:24,977 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/746661b693174a64876fb98985dfd950 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/746661b693174a64876fb98985dfd950 2024-11-28T09:22:24,979 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/4e45d21289dc4263aa23838c4b697a15 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/4e45d21289dc4263aa23838c4b697a15 2024-11-28T09:22:24,980 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/beafa48671ec4afb97a85e2f71e79929 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/beafa48671ec4afb97a85e2f71e79929 2024-11-28T09:22:24,981 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/c0d466f910f4467783cd60b8a25b9b51 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/c0d466f910f4467783cd60b8a25b9b51 2024-11-28T09:22:24,982 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/e8b9441590cf40bdbcb528598ab048c9 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/e8b9441590cf40bdbcb528598ab048c9 2024-11-28T09:22:24,983 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/d98cf9fda27a487687e0e75bbb3c53fa to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/d98cf9fda27a487687e0e75bbb3c53fa 2024-11-28T09:22:24,985 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/a0845ed2c60f43b8a706f13a93cfd82d to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/a0845ed2c60f43b8a706f13a93cfd82d 2024-11-28T09:22:24,986 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/00cc467266824e62ba7d56607af4e843 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/00cc467266824e62ba7d56607af4e843 2024-11-28T09:22:24,987 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/7af86d58eb94443ab0524d5e8e0e6e6a to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/7af86d58eb94443ab0524d5e8e0e6e6a 2024-11-28T09:22:24,989 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/5aa7972ff7a44451922f7cf044a63a9d, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/e909d4f92e5042fe8cdf89a20162b580, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/602bef74cfc04108bebe4b16bc59e4cc, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/f9479b2fbf3c4722b216b8190393aa5f, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/330c9c9716d34458b951df4bbb700357, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/1bf26449135f43eb82eed2d2f79adfba, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/ec9e224603aa47dd98acae6fe22ca709, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/93392686e147409c81a2aecbaa1e082f, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/7b003e8864994048bddf11fd2bb0196a, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/154260b9c72a4e8196feff8f36077a62, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/552729a8a77d420da9d4e941165be690, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/305c39fba3be447891c467540e7d22d8, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/19625573a5f8440d8608087983437e86, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/d121da5913d44600ab5e6e8ce3b734d8, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/5f18b2b3dde64ad6b95917779ce4502d, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/745ec3c8afae4a0babf52ea19d1b2ca4, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/d9a41a27c61c43649373df255bfa676d, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/6fb987abdc904cdcb5ecf67ec5ce4268, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/b12a95c4f8354e4f90451ec326f3da70, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/a7fc82142cf9408e898d72dc7a78af84, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/cbaf09dcf1eb4cc7906c142bb84a8dad, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/ab6aeafb7ed746199c01f07921539f8a, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/8bde5bb1551d4f2cae7f3831a5903d2f, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/382987eecedc4f5e8ce3de66d5771eca, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/28e0da7b97a6402d9a9abeb94443342a, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/b6478d64f1f04c45a2ff53a3d08bf3c7, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/e280f892d5084cd1a8d0b8b45e39b9e4, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/03117742c21a42dbb2c1deeb699171ab, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/875876e8e5e942c2be1cf1bca4e23edd, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/5487a2e5200c4d1cbdd3ce984eec2f30, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/06c77337be05470693be97b9d9f20171, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/e6eb3e89db1d47df8fd94e46d7bef978, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/febb343dc0e44f97b5239c130213a473, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/ce5e97e1e9e64d99b47b0aa0663cbc6a, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/46383e659f7e407f9f1d514f5edceb61, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/266a0357d3a94f3ea721e47305afd6c3, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/d459baf8379744e3b6ce9cb210b8e37a, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/fc2bd06adfa04152acd25c947b455a93, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/856c45bd2b074dd19c5bf0958b3ff074, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/25bf92662b6648ddbd392187015ea5f7] to archive 2024-11-28T09:22:24,990 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-28T09:22:24,999 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/5aa7972ff7a44451922f7cf044a63a9d to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/5aa7972ff7a44451922f7cf044a63a9d 2024-11-28T09:22:25,004 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/e909d4f92e5042fe8cdf89a20162b580 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/e909d4f92e5042fe8cdf89a20162b580 2024-11-28T09:22:25,005 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/602bef74cfc04108bebe4b16bc59e4cc to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/602bef74cfc04108bebe4b16bc59e4cc 2024-11-28T09:22:25,006 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/f9479b2fbf3c4722b216b8190393aa5f to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/f9479b2fbf3c4722b216b8190393aa5f 2024-11-28T09:22:25,008 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/330c9c9716d34458b951df4bbb700357 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/330c9c9716d34458b951df4bbb700357 2024-11-28T09:22:25,009 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/1bf26449135f43eb82eed2d2f79adfba to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/1bf26449135f43eb82eed2d2f79adfba 2024-11-28T09:22:25,010 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/ec9e224603aa47dd98acae6fe22ca709 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/ec9e224603aa47dd98acae6fe22ca709 2024-11-28T09:22:25,012 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/93392686e147409c81a2aecbaa1e082f to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/93392686e147409c81a2aecbaa1e082f 2024-11-28T09:22:25,013 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/7b003e8864994048bddf11fd2bb0196a to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/7b003e8864994048bddf11fd2bb0196a 2024-11-28T09:22:25,014 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/154260b9c72a4e8196feff8f36077a62 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/154260b9c72a4e8196feff8f36077a62 2024-11-28T09:22:25,016 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/552729a8a77d420da9d4e941165be690 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/552729a8a77d420da9d4e941165be690 2024-11-28T09:22:25,017 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/305c39fba3be447891c467540e7d22d8 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/305c39fba3be447891c467540e7d22d8 2024-11-28T09:22:25,018 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/19625573a5f8440d8608087983437e86 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/19625573a5f8440d8608087983437e86 2024-11-28T09:22:25,020 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/d121da5913d44600ab5e6e8ce3b734d8 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/d121da5913d44600ab5e6e8ce3b734d8 2024-11-28T09:22:25,021 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/5f18b2b3dde64ad6b95917779ce4502d to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/5f18b2b3dde64ad6b95917779ce4502d 2024-11-28T09:22:25,022 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/745ec3c8afae4a0babf52ea19d1b2ca4 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/745ec3c8afae4a0babf52ea19d1b2ca4 2024-11-28T09:22:25,024 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/d9a41a27c61c43649373df255bfa676d to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/d9a41a27c61c43649373df255bfa676d 2024-11-28T09:22:25,028 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/6fb987abdc904cdcb5ecf67ec5ce4268 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/6fb987abdc904cdcb5ecf67ec5ce4268 2024-11-28T09:22:25,030 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/b12a95c4f8354e4f90451ec326f3da70 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/b12a95c4f8354e4f90451ec326f3da70 2024-11-28T09:22:25,032 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/a7fc82142cf9408e898d72dc7a78af84 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/a7fc82142cf9408e898d72dc7a78af84 2024-11-28T09:22:25,033 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/cbaf09dcf1eb4cc7906c142bb84a8dad to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/cbaf09dcf1eb4cc7906c142bb84a8dad 2024-11-28T09:22:25,035 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/ab6aeafb7ed746199c01f07921539f8a to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/ab6aeafb7ed746199c01f07921539f8a 2024-11-28T09:22:25,037 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/8bde5bb1551d4f2cae7f3831a5903d2f to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/8bde5bb1551d4f2cae7f3831a5903d2f 2024-11-28T09:22:25,038 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/382987eecedc4f5e8ce3de66d5771eca to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/382987eecedc4f5e8ce3de66d5771eca 2024-11-28T09:22:25,040 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/28e0da7b97a6402d9a9abeb94443342a to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/28e0da7b97a6402d9a9abeb94443342a 2024-11-28T09:22:25,041 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/b6478d64f1f04c45a2ff53a3d08bf3c7 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/b6478d64f1f04c45a2ff53a3d08bf3c7 2024-11-28T09:22:25,043 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/e280f892d5084cd1a8d0b8b45e39b9e4 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/e280f892d5084cd1a8d0b8b45e39b9e4 2024-11-28T09:22:25,044 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/03117742c21a42dbb2c1deeb699171ab to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/03117742c21a42dbb2c1deeb699171ab 2024-11-28T09:22:25,045 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/875876e8e5e942c2be1cf1bca4e23edd to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/875876e8e5e942c2be1cf1bca4e23edd 2024-11-28T09:22:25,046 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/5487a2e5200c4d1cbdd3ce984eec2f30 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/5487a2e5200c4d1cbdd3ce984eec2f30 2024-11-28T09:22:25,047 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/06c77337be05470693be97b9d9f20171 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/06c77337be05470693be97b9d9f20171 2024-11-28T09:22:25,048 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/e6eb3e89db1d47df8fd94e46d7bef978 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/e6eb3e89db1d47df8fd94e46d7bef978 2024-11-28T09:22:25,051 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/febb343dc0e44f97b5239c130213a473 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/febb343dc0e44f97b5239c130213a473 2024-11-28T09:22:25,052 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/ce5e97e1e9e64d99b47b0aa0663cbc6a to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/ce5e97e1e9e64d99b47b0aa0663cbc6a 2024-11-28T09:22:25,055 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/46383e659f7e407f9f1d514f5edceb61 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/46383e659f7e407f9f1d514f5edceb61 2024-11-28T09:22:25,056 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/266a0357d3a94f3ea721e47305afd6c3 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/266a0357d3a94f3ea721e47305afd6c3 2024-11-28T09:22:25,057 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/d459baf8379744e3b6ce9cb210b8e37a to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/d459baf8379744e3b6ce9cb210b8e37a 2024-11-28T09:22:25,059 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/fc2bd06adfa04152acd25c947b455a93 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/fc2bd06adfa04152acd25c947b455a93 2024-11-28T09:22:25,060 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/856c45bd2b074dd19c5bf0958b3ff074 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/856c45bd2b074dd19c5bf0958b3ff074 2024-11-28T09:22:25,061 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/25bf92662b6648ddbd392187015ea5f7 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/25bf92662b6648ddbd392187015ea5f7 2024-11-28T09:22:25,062 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/11e9418a3168477dad67d93e2b38b674, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/43ad14a09ef24684aacf717f307bd2a0, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/ebfce1014db04b60a7fb3decd876fc5c, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/56935dcd958e461c856d620ff2bf2aa8, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/08ad9ed33b13488ba0fc21b6d37da973, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/4b69419dee204a189c25e7584117aa1e, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/65b20c2379a348588d46062bb9801e2d, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/d5c3bb0cb7a949dfa2cc054ac09e8493, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/84ef6f3bb14245f6932191c3c9594901, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/d7ebf9da0c8e41e4a55381822bf2f984, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/7593e57fe5824f70a775212d804a1273, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/714de24ef91f49bfbee10817c9e75f2e, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/83ebbd03e6684d61816ab21ec061c413, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/be3bc9641ea5492289b7e95b777a988f, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/cdd1bc576a014d2293c484fa70f97649, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/0b7ac173f3754f5d8765b742ce4834d7, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/b1f2fcb49b0d4837aaf8d5ac26a21c95, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/c3e1f18cf2a844d48e5c5eab36e6471e, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/63c3ed31bed24ecea50cc43b7600f623, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/a571ec66d3a34ceb867c1d3d0b4ef012, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/06c6f7f8412048deb639cf42b8df5861, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/4c182da125fb4801b7d19d2d942a420b, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/48143c8041da43508f439891189a9b60, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/1bb76c477831479fab5c030a114a205b, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/1c3e2fa59b2c41398500398046e02170, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/927472b689f94b53bf2783db4b20a3fb, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/cd3a9300e682471b958edf9efc4f24c4, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/b6f5f8c123f242bfb68a20fde5000b47, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/008a8e2f1e8242bebf53d7f8800b2988, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/ce1fc47482b04aee9d1257c4ac0b74b7, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/60213f43ef484492b81252d072ffd5c8, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/04a3f1638a5e4f35976a44deac1e4f4a, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/f65b94d7c2974570bd04ad1f00fc6e97, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/79e1c946d04642f2befe4a4500692295, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/8c23ed2b64f5422d9e64045835fda331, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/a5b7d922a6384b088c3a25609d55cc3a, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/32982aa720de44fca1d1029d23b46d91, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/4f76377fefe649c595732d99a1d245ca, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/142c41e4920d4bcc9ff35d363279f2a2, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/5d75520fd1074f229c58d24d6d12ae88] to archive 2024-11-28T09:22:25,063 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-28T09:22:25,065 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/11e9418a3168477dad67d93e2b38b674 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/11e9418a3168477dad67d93e2b38b674 2024-11-28T09:22:25,066 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/43ad14a09ef24684aacf717f307bd2a0 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/43ad14a09ef24684aacf717f307bd2a0 2024-11-28T09:22:25,067 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/ebfce1014db04b60a7fb3decd876fc5c to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/ebfce1014db04b60a7fb3decd876fc5c 2024-11-28T09:22:25,069 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/56935dcd958e461c856d620ff2bf2aa8 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/56935dcd958e461c856d620ff2bf2aa8 2024-11-28T09:22:25,069 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/08ad9ed33b13488ba0fc21b6d37da973 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/08ad9ed33b13488ba0fc21b6d37da973 2024-11-28T09:22:25,071 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/4b69419dee204a189c25e7584117aa1e to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/4b69419dee204a189c25e7584117aa1e 2024-11-28T09:22:25,072 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/65b20c2379a348588d46062bb9801e2d to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/65b20c2379a348588d46062bb9801e2d 2024-11-28T09:22:25,076 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/d5c3bb0cb7a949dfa2cc054ac09e8493 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/d5c3bb0cb7a949dfa2cc054ac09e8493 2024-11-28T09:22:25,077 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/84ef6f3bb14245f6932191c3c9594901 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/84ef6f3bb14245f6932191c3c9594901 2024-11-28T09:22:25,090 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/d7ebf9da0c8e41e4a55381822bf2f984 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/d7ebf9da0c8e41e4a55381822bf2f984 2024-11-28T09:22:25,091 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/7593e57fe5824f70a775212d804a1273 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/7593e57fe5824f70a775212d804a1273 2024-11-28T09:22:25,093 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/714de24ef91f49bfbee10817c9e75f2e to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/714de24ef91f49bfbee10817c9e75f2e 2024-11-28T09:22:25,095 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/83ebbd03e6684d61816ab21ec061c413 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/83ebbd03e6684d61816ab21ec061c413 2024-11-28T09:22:25,100 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/be3bc9641ea5492289b7e95b777a988f to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/be3bc9641ea5492289b7e95b777a988f 2024-11-28T09:22:25,101 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/cdd1bc576a014d2293c484fa70f97649 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/cdd1bc576a014d2293c484fa70f97649 2024-11-28T09:22:25,102 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/0b7ac173f3754f5d8765b742ce4834d7 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/0b7ac173f3754f5d8765b742ce4834d7 2024-11-28T09:22:25,104 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/b1f2fcb49b0d4837aaf8d5ac26a21c95 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/b1f2fcb49b0d4837aaf8d5ac26a21c95 2024-11-28T09:22:25,106 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/c3e1f18cf2a844d48e5c5eab36e6471e to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/c3e1f18cf2a844d48e5c5eab36e6471e 2024-11-28T09:22:25,107 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/63c3ed31bed24ecea50cc43b7600f623 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/63c3ed31bed24ecea50cc43b7600f623 2024-11-28T09:22:25,114 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/a571ec66d3a34ceb867c1d3d0b4ef012 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/a571ec66d3a34ceb867c1d3d0b4ef012 2024-11-28T09:22:25,118 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/06c6f7f8412048deb639cf42b8df5861 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/06c6f7f8412048deb639cf42b8df5861 2024-11-28T09:22:25,119 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/4c182da125fb4801b7d19d2d942a420b to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/4c182da125fb4801b7d19d2d942a420b 2024-11-28T09:22:25,126 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/48143c8041da43508f439891189a9b60 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/48143c8041da43508f439891189a9b60 2024-11-28T09:22:25,129 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/1bb76c477831479fab5c030a114a205b to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/1bb76c477831479fab5c030a114a205b 2024-11-28T09:22:25,131 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/1c3e2fa59b2c41398500398046e02170 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/1c3e2fa59b2c41398500398046e02170 2024-11-28T09:22:25,132 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/927472b689f94b53bf2783db4b20a3fb to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/927472b689f94b53bf2783db4b20a3fb 2024-11-28T09:22:25,135 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/cd3a9300e682471b958edf9efc4f24c4 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/cd3a9300e682471b958edf9efc4f24c4 2024-11-28T09:22:25,136 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/b6f5f8c123f242bfb68a20fde5000b47 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/b6f5f8c123f242bfb68a20fde5000b47 2024-11-28T09:22:25,140 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/008a8e2f1e8242bebf53d7f8800b2988 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/008a8e2f1e8242bebf53d7f8800b2988 2024-11-28T09:22:25,141 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/ce1fc47482b04aee9d1257c4ac0b74b7 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/ce1fc47482b04aee9d1257c4ac0b74b7 2024-11-28T09:22:25,142 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/60213f43ef484492b81252d072ffd5c8 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/60213f43ef484492b81252d072ffd5c8 2024-11-28T09:22:25,143 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/04a3f1638a5e4f35976a44deac1e4f4a to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/04a3f1638a5e4f35976a44deac1e4f4a 2024-11-28T09:22:25,146 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/f65b94d7c2974570bd04ad1f00fc6e97 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/f65b94d7c2974570bd04ad1f00fc6e97 2024-11-28T09:22:25,150 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/79e1c946d04642f2befe4a4500692295 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/79e1c946d04642f2befe4a4500692295 2024-11-28T09:22:25,152 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/8c23ed2b64f5422d9e64045835fda331 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/8c23ed2b64f5422d9e64045835fda331 2024-11-28T09:22:25,153 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/a5b7d922a6384b088c3a25609d55cc3a to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/a5b7d922a6384b088c3a25609d55cc3a 2024-11-28T09:22:25,154 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/32982aa720de44fca1d1029d23b46d91 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/32982aa720de44fca1d1029d23b46d91 2024-11-28T09:22:25,157 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/4f76377fefe649c595732d99a1d245ca to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/4f76377fefe649c595732d99a1d245ca 2024-11-28T09:22:25,158 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/142c41e4920d4bcc9ff35d363279f2a2 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/142c41e4920d4bcc9ff35d363279f2a2 2024-11-28T09:22:25,161 DEBUG [StoreCloser-TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/5d75520fd1074f229c58d24d6d12ae88 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/5d75520fd1074f229c58d24d6d12ae88 2024-11-28T09:22:25,189 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/recovered.edits/589.seqid, newMaxSeqId=589, maxSeqId=1 2024-11-28T09:22:25,190 INFO [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869. 2024-11-28T09:22:25,190 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1635): Region close journal for 0ad9609950a045418498b830dd929869: 2024-11-28T09:22:25,193 INFO [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] handler.UnassignRegionHandler(170): Closed 0ad9609950a045418498b830dd929869 2024-11-28T09:22:25,193 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=95 updating hbase:meta row=0ad9609950a045418498b830dd929869, regionState=CLOSED 2024-11-28T09:22:25,198 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=96, resume processing ppid=95 2024-11-28T09:22:25,198 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=96, ppid=95, state=SUCCESS; CloseRegionProcedure 0ad9609950a045418498b830dd929869, server=363d8d38a970,33819,1732785660637 in 1.3890 sec 2024-11-28T09:22:25,200 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=95, resume processing ppid=94 2024-11-28T09:22:25,200 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=95, ppid=94, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=0ad9609950a045418498b830dd929869, UNASSIGN in 1.3940 sec 2024-11-28T09:22:25,202 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=94, resume processing ppid=93 2024-11-28T09:22:25,202 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=94, ppid=93, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.3980 sec 2024-11-28T09:22:25,203 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732785745203"}]},"ts":"1732785745203"} 2024-11-28T09:22:25,207 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-28T09:22:25,210 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-28T09:22:25,212 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=93, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.4150 sec 2024-11-28T09:22:25,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-11-28T09:22:25,904 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 93 completed 2024-11-28T09:22:25,904 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-28T09:22:25,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] procedure2.ProcedureExecutor(1098): Stored pid=97, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-28T09:22:25,906 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=97, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-28T09:22:25,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=97 2024-11-28T09:22:25,908 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=97, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-28T09:22:25,913 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869 2024-11-28T09:22:25,925 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A, FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B, FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C, FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/recovered.edits] 2024-11-28T09:22:25,941 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/0c93be93e118480790f7105bd0ff6735 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/0c93be93e118480790f7105bd0ff6735 2024-11-28T09:22:25,944 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/2ebfdebea6994cfaae1d15d7e4214c5f to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/2ebfdebea6994cfaae1d15d7e4214c5f 2024-11-28T09:22:25,945 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/bdd5842e606a40b0b87483a401110076 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/A/bdd5842e606a40b0b87483a401110076 2024-11-28T09:22:25,948 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/545e1623ddad454d8bd983e2d22d48bb to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/545e1623ddad454d8bd983e2d22d48bb 2024-11-28T09:22:25,950 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/8c2d5ea3084c43669ccfd6155cc20f84 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/8c2d5ea3084c43669ccfd6155cc20f84 2024-11-28T09:22:25,951 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/f6d4289c100b40e484ed3f6cb3b130e1 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/B/f6d4289c100b40e484ed3f6cb3b130e1 2024-11-28T09:22:25,955 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/13a8b28f7aa2481389e05bd575744d30 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/13a8b28f7aa2481389e05bd575744d30 2024-11-28T09:22:25,956 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/a15dba0d064340caba3b242dc01a2eec to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/a15dba0d064340caba3b242dc01a2eec 2024-11-28T09:22:25,958 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/e6fbb86ae2ad4f52aee4594b4306a596 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/C/e6fbb86ae2ad4f52aee4594b4306a596 2024-11-28T09:22:25,961 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/recovered.edits/589.seqid to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869/recovered.edits/589.seqid 2024-11-28T09:22:25,961 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/0ad9609950a045418498b830dd929869 2024-11-28T09:22:25,961 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-28T09:22:25,965 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=97, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-28T09:22:25,992 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-28T09:22:25,994 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-28T09:22:25,995 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=97, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-28T09:22:25,995 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-28T09:22:25,996 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732785745995"}]},"ts":"9223372036854775807"} 2024-11-28T09:22:26,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=97 2024-11-28T09:22:26,016 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-28T09:22:26,016 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 0ad9609950a045418498b830dd929869, NAME => 'TestAcidGuarantees,,1732785719388.0ad9609950a045418498b830dd929869.', STARTKEY => '', ENDKEY => ''}] 2024-11-28T09:22:26,016 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-28T09:22:26,016 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732785746016"}]},"ts":"9223372036854775807"} 2024-11-28T09:22:26,018 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-28T09:22:26,022 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=97, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-28T09:22:26,029 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=97, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 118 msec 2024-11-28T09:22:26,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=97 2024-11-28T09:22:26,210 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 97 completed 2024-11-28T09:22:26,222 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithBasicPolicy#testGetAtomicity Thread=241 (was 242), OpenFileDescriptor=458 (was 466), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=594 (was 492) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=4377 (was 4370) - AvailableMemoryMB LEAK? - 2024-11-28T09:22:26,234 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithBasicPolicy#testMobScanAtomicity Thread=241, OpenFileDescriptor=458, MaxFileDescriptor=1048576, SystemLoadAverage=594, ProcessCount=11, AvailableMemoryMB=4376 2024-11-28T09:22:26,236 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-28T09:22:26,236 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-28T09:22:26,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] procedure2.ProcedureExecutor(1098): Stored pid=98, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-28T09:22:26,239 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=98, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-28T09:22:26,240 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:26,240 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 98 2024-11-28T09:22:26,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-11-28T09:22:26,240 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=98, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-28T09:22:26,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742257_1433 (size=960) 2024-11-28T09:22:26,266 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532 2024-11-28T09:22:26,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742258_1434 (size=53) 2024-11-28T09:22:26,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-11-28T09:22:26,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-11-28T09:22:26,686 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T09:22:26,686 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 142d3ce9e2b1d2945b57067dd3e37abc, disabling compactions & flushes 2024-11-28T09:22:26,686 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:26,686 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:26,686 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. after waiting 0 ms 2024-11-28T09:22:26,686 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:26,687 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:26,687 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 142d3ce9e2b1d2945b57067dd3e37abc: 2024-11-28T09:22:26,688 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=98, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-28T09:22:26,689 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732785746688"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732785746688"}]},"ts":"1732785746688"} 2024-11-28T09:22:26,690 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-28T09:22:26,691 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=98, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-28T09:22:26,691 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732785746691"}]},"ts":"1732785746691"} 2024-11-28T09:22:26,692 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-28T09:22:26,697 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=99, ppid=98, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=142d3ce9e2b1d2945b57067dd3e37abc, ASSIGN}] 2024-11-28T09:22:26,698 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=99, ppid=98, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=142d3ce9e2b1d2945b57067dd3e37abc, ASSIGN 2024-11-28T09:22:26,699 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=99, ppid=98, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=142d3ce9e2b1d2945b57067dd3e37abc, ASSIGN; state=OFFLINE, location=363d8d38a970,33819,1732785660637; forceNewPlan=false, retain=false 2024-11-28T09:22:26,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-11-28T09:22:26,850 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=99 updating hbase:meta row=142d3ce9e2b1d2945b57067dd3e37abc, regionState=OPENING, regionLocation=363d8d38a970,33819,1732785660637 2024-11-28T09:22:26,851 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=100, ppid=99, state=RUNNABLE; OpenRegionProcedure 142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637}] 2024-11-28T09:22:27,003 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:27,007 INFO [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:27,008 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(7285): Opening region: {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} 2024-11-28T09:22:27,008 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 142d3ce9e2b1d2945b57067dd3e37abc 2024-11-28T09:22:27,008 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T09:22:27,008 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(7327): checking encryption for 142d3ce9e2b1d2945b57067dd3e37abc 2024-11-28T09:22:27,008 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(7330): checking classloading for 142d3ce9e2b1d2945b57067dd3e37abc 2024-11-28T09:22:27,010 INFO [StoreOpener-142d3ce9e2b1d2945b57067dd3e37abc-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 142d3ce9e2b1d2945b57067dd3e37abc 2024-11-28T09:22:27,011 INFO [StoreOpener-142d3ce9e2b1d2945b57067dd3e37abc-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-28T09:22:27,011 INFO [StoreOpener-142d3ce9e2b1d2945b57067dd3e37abc-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 142d3ce9e2b1d2945b57067dd3e37abc columnFamilyName A 2024-11-28T09:22:27,011 DEBUG [StoreOpener-142d3ce9e2b1d2945b57067dd3e37abc-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:27,012 INFO [StoreOpener-142d3ce9e2b1d2945b57067dd3e37abc-1 {}] regionserver.HStore(327): Store=142d3ce9e2b1d2945b57067dd3e37abc/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T09:22:27,012 INFO [StoreOpener-142d3ce9e2b1d2945b57067dd3e37abc-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 142d3ce9e2b1d2945b57067dd3e37abc 2024-11-28T09:22:27,013 INFO [StoreOpener-142d3ce9e2b1d2945b57067dd3e37abc-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-28T09:22:27,013 INFO [StoreOpener-142d3ce9e2b1d2945b57067dd3e37abc-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 142d3ce9e2b1d2945b57067dd3e37abc columnFamilyName B 2024-11-28T09:22:27,013 DEBUG [StoreOpener-142d3ce9e2b1d2945b57067dd3e37abc-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:27,014 INFO [StoreOpener-142d3ce9e2b1d2945b57067dd3e37abc-1 {}] regionserver.HStore(327): Store=142d3ce9e2b1d2945b57067dd3e37abc/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T09:22:27,014 INFO [StoreOpener-142d3ce9e2b1d2945b57067dd3e37abc-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 142d3ce9e2b1d2945b57067dd3e37abc 2024-11-28T09:22:27,015 INFO [StoreOpener-142d3ce9e2b1d2945b57067dd3e37abc-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-28T09:22:27,015 INFO [StoreOpener-142d3ce9e2b1d2945b57067dd3e37abc-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 142d3ce9e2b1d2945b57067dd3e37abc columnFamilyName C 2024-11-28T09:22:27,015 DEBUG [StoreOpener-142d3ce9e2b1d2945b57067dd3e37abc-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:27,016 INFO [StoreOpener-142d3ce9e2b1d2945b57067dd3e37abc-1 {}] regionserver.HStore(327): Store=142d3ce9e2b1d2945b57067dd3e37abc/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T09:22:27,016 INFO [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:27,017 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc 2024-11-28T09:22:27,017 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc 2024-11-28T09:22:27,018 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-28T09:22:27,019 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1085): writing seq id for 142d3ce9e2b1d2945b57067dd3e37abc 2024-11-28T09:22:27,021 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-28T09:22:27,022 INFO [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1102): Opened 142d3ce9e2b1d2945b57067dd3e37abc; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65524616, jitterRate=-0.02360713481903076}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-28T09:22:27,023 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1001): Region open journal for 142d3ce9e2b1d2945b57067dd3e37abc: 2024-11-28T09:22:27,023 INFO [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc., pid=100, masterSystemTime=1732785747003 2024-11-28T09:22:27,025 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:27,025 INFO [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:27,025 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=99 updating hbase:meta row=142d3ce9e2b1d2945b57067dd3e37abc, regionState=OPEN, openSeqNum=2, regionLocation=363d8d38a970,33819,1732785660637 2024-11-28T09:22:27,027 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=100, resume processing ppid=99 2024-11-28T09:22:27,028 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=100, ppid=99, state=SUCCESS; OpenRegionProcedure 142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 in 175 msec 2024-11-28T09:22:27,029 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=99, resume processing ppid=98 2024-11-28T09:22:27,029 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=99, ppid=98, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=142d3ce9e2b1d2945b57067dd3e37abc, ASSIGN in 331 msec 2024-11-28T09:22:27,030 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=98, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-28T09:22:27,030 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732785747030"}]},"ts":"1732785747030"} 2024-11-28T09:22:27,031 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-28T09:22:27,037 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=98, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-28T09:22:27,038 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=98, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 801 msec 2024-11-28T09:22:27,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-11-28T09:22:27,346 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 98 completed 2024-11-28T09:22:27,348 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2b976e1a to 127.0.0.1:53251 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1df61dc9 2024-11-28T09:22:27,383 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5fe71801, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T09:22:27,385 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T09:22:27,387 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36110, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T09:22:27,388 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-28T09:22:27,390 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52228, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-28T09:22:27,392 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-28T09:22:27,392 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-28T09:22:27,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] procedure2.ProcedureExecutor(1098): Stored pid=101, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-11-28T09:22:27,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742259_1435 (size=996) 2024-11-28T09:22:27,435 DEBUG [PEWorker-4 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.960 2024-11-28T09:22:27,435 INFO [PEWorker-4 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.996 2024-11-28T09:22:27,437 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=102, ppid=101, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-28T09:22:27,440 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=103, ppid=102, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=142d3ce9e2b1d2945b57067dd3e37abc, REOPEN/MOVE}] 2024-11-28T09:22:27,441 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=103, ppid=102, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=142d3ce9e2b1d2945b57067dd3e37abc, REOPEN/MOVE 2024-11-28T09:22:27,442 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=103 updating hbase:meta row=142d3ce9e2b1d2945b57067dd3e37abc, regionState=CLOSING, regionLocation=363d8d38a970,33819,1732785660637 2024-11-28T09:22:27,443 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-28T09:22:27,443 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=104, ppid=103, state=RUNNABLE; CloseRegionProcedure 142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637}] 2024-11-28T09:22:27,594 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:27,595 INFO [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] handler.UnassignRegionHandler(124): Close 142d3ce9e2b1d2945b57067dd3e37abc 2024-11-28T09:22:27,595 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-28T09:22:27,595 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.HRegion(1681): Closing 142d3ce9e2b1d2945b57067dd3e37abc, disabling compactions & flushes 2024-11-28T09:22:27,595 INFO [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:27,595 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:27,595 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. after waiting 0 ms 2024-11-28T09:22:27,595 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:27,599 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-11-28T09:22:27,600 INFO [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:27,600 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.HRegion(1635): Region close journal for 142d3ce9e2b1d2945b57067dd3e37abc: 2024-11-28T09:22:27,600 WARN [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] regionserver.HRegionServer(3786): Not adding moved region record: 142d3ce9e2b1d2945b57067dd3e37abc to self. 2024-11-28T09:22:27,601 INFO [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=104}] handler.UnassignRegionHandler(170): Closed 142d3ce9e2b1d2945b57067dd3e37abc 2024-11-28T09:22:27,603 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=103 updating hbase:meta row=142d3ce9e2b1d2945b57067dd3e37abc, regionState=CLOSED 2024-11-28T09:22:27,605 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=104, resume processing ppid=103 2024-11-28T09:22:27,605 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=104, ppid=103, state=SUCCESS; CloseRegionProcedure 142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 in 161 msec 2024-11-28T09:22:27,606 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=103, ppid=102, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=142d3ce9e2b1d2945b57067dd3e37abc, REOPEN/MOVE; state=CLOSED, location=363d8d38a970,33819,1732785660637; forceNewPlan=false, retain=true 2024-11-28T09:22:27,756 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=103 updating hbase:meta row=142d3ce9e2b1d2945b57067dd3e37abc, regionState=OPENING, regionLocation=363d8d38a970,33819,1732785660637 2024-11-28T09:22:27,759 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=105, ppid=103, state=RUNNABLE; OpenRegionProcedure 142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637}] 2024-11-28T09:22:27,911 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:27,914 INFO [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:27,914 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(7285): Opening region: {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} 2024-11-28T09:22:27,915 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 142d3ce9e2b1d2945b57067dd3e37abc 2024-11-28T09:22:27,915 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T09:22:27,915 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(7327): checking encryption for 142d3ce9e2b1d2945b57067dd3e37abc 2024-11-28T09:22:27,915 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(7330): checking classloading for 142d3ce9e2b1d2945b57067dd3e37abc 2024-11-28T09:22:27,922 INFO [StoreOpener-142d3ce9e2b1d2945b57067dd3e37abc-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 142d3ce9e2b1d2945b57067dd3e37abc 2024-11-28T09:22:27,925 INFO [StoreOpener-142d3ce9e2b1d2945b57067dd3e37abc-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-28T09:22:27,925 INFO [StoreOpener-142d3ce9e2b1d2945b57067dd3e37abc-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 142d3ce9e2b1d2945b57067dd3e37abc columnFamilyName A 2024-11-28T09:22:27,928 DEBUG [StoreOpener-142d3ce9e2b1d2945b57067dd3e37abc-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:27,928 INFO [StoreOpener-142d3ce9e2b1d2945b57067dd3e37abc-1 {}] regionserver.HStore(327): Store=142d3ce9e2b1d2945b57067dd3e37abc/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T09:22:27,929 INFO [StoreOpener-142d3ce9e2b1d2945b57067dd3e37abc-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 142d3ce9e2b1d2945b57067dd3e37abc 2024-11-28T09:22:27,930 INFO [StoreOpener-142d3ce9e2b1d2945b57067dd3e37abc-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-28T09:22:27,930 INFO [StoreOpener-142d3ce9e2b1d2945b57067dd3e37abc-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 142d3ce9e2b1d2945b57067dd3e37abc columnFamilyName B 2024-11-28T09:22:27,930 DEBUG [StoreOpener-142d3ce9e2b1d2945b57067dd3e37abc-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:27,930 INFO [StoreOpener-142d3ce9e2b1d2945b57067dd3e37abc-1 {}] regionserver.HStore(327): Store=142d3ce9e2b1d2945b57067dd3e37abc/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T09:22:27,931 INFO [StoreOpener-142d3ce9e2b1d2945b57067dd3e37abc-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 142d3ce9e2b1d2945b57067dd3e37abc 2024-11-28T09:22:27,931 INFO [StoreOpener-142d3ce9e2b1d2945b57067dd3e37abc-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-28T09:22:27,931 INFO [StoreOpener-142d3ce9e2b1d2945b57067dd3e37abc-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 142d3ce9e2b1d2945b57067dd3e37abc columnFamilyName C 2024-11-28T09:22:27,931 DEBUG [StoreOpener-142d3ce9e2b1d2945b57067dd3e37abc-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:27,932 INFO [StoreOpener-142d3ce9e2b1d2945b57067dd3e37abc-1 {}] regionserver.HStore(327): Store=142d3ce9e2b1d2945b57067dd3e37abc/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T09:22:27,932 INFO [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:27,933 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc 2024-11-28T09:22:27,934 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc 2024-11-28T09:22:27,937 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-28T09:22:27,939 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(1085): writing seq id for 142d3ce9e2b1d2945b57067dd3e37abc 2024-11-28T09:22:27,942 INFO [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(1102): Opened 142d3ce9e2b1d2945b57067dd3e37abc; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64000798, jitterRate=-0.04631379246711731}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-28T09:22:27,942 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegion(1001): Region open journal for 142d3ce9e2b1d2945b57067dd3e37abc: 2024-11-28T09:22:27,943 INFO [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc., pid=105, masterSystemTime=1732785747911 2024-11-28T09:22:27,946 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:27,946 INFO [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=105}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:27,946 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=103 updating hbase:meta row=142d3ce9e2b1d2945b57067dd3e37abc, regionState=OPEN, openSeqNum=5, regionLocation=363d8d38a970,33819,1732785660637 2024-11-28T09:22:27,949 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=105, resume processing ppid=103 2024-11-28T09:22:27,949 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=105, ppid=103, state=SUCCESS; OpenRegionProcedure 142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 in 189 msec 2024-11-28T09:22:27,951 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=103, resume processing ppid=102 2024-11-28T09:22:27,951 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=103, ppid=102, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=142d3ce9e2b1d2945b57067dd3e37abc, REOPEN/MOVE in 509 msec 2024-11-28T09:22:27,953 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=102, resume processing ppid=101 2024-11-28T09:22:27,954 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=102, ppid=101, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 514 msec 2024-11-28T09:22:27,957 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=101, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 561 msec 2024-11-28T09:22:27,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=101 2024-11-28T09:22:27,961 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1b82ba2a to 127.0.0.1:53251 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3637e4c6 2024-11-28T09:22:27,985 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@51f7d511, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T09:22:27,986 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7b6cf8cb to 127.0.0.1:53251 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@72f422b4 2024-11-28T09:22:28,004 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1dc42ea6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T09:22:28,005 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7ec15031 to 127.0.0.1:53251 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2df33cdf 2024-11-28T09:22:28,019 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@117e86d9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T09:22:28,020 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3dd5b441 to 127.0.0.1:53251 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@9f472e0 2024-11-28T09:22:28,025 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6cd96549, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T09:22:28,026 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3c336ea4 to 127.0.0.1:53251 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@167a78b0 2024-11-28T09:22:28,033 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@31aea41b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T09:22:28,034 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5aee939b to 127.0.0.1:53251 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1e247aa1 2024-11-28T09:22:28,044 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@801ba40, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T09:22:28,045 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1f49665c to 127.0.0.1:53251 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2205f666 2024-11-28T09:22:28,052 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@27539bdc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T09:22:28,053 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x683f8469 to 127.0.0.1:53251 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6584e9ce 2024-11-28T09:22:28,060 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5e3203d9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T09:22:28,061 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x75e4d3d0 to 127.0.0.1:53251 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@37ec8e3b 2024-11-28T09:22:28,072 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@798e7fd4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T09:22:28,073 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2b308f62 to 127.0.0.1:53251 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@787e5169 2024-11-28T09:22:28,113 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7284f16d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T09:22:28,136 DEBUG [hconnection-0x4188067e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T09:22:28,138 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36112, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T09:22:28,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 142d3ce9e2b1d2945b57067dd3e37abc 2024-11-28T09:22:28,152 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 142d3ce9e2b1d2945b57067dd3e37abc 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-28T09:22:28,152 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 142d3ce9e2b1d2945b57067dd3e37abc, store=A 2024-11-28T09:22:28,152 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:28,152 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 142d3ce9e2b1d2945b57067dd3e37abc, store=B 2024-11-28T09:22:28,152 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:28,152 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 142d3ce9e2b1d2945b57067dd3e37abc, store=C 2024-11-28T09:22:28,152 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:28,168 DEBUG [hconnection-0x285f5ed9-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T09:22:28,169 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36124, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T09:22:28,188 DEBUG [hconnection-0x7034766d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T09:22:28,189 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36126, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T09:22:28,194 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:28,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36112 deadline: 1732785808194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:28,196 DEBUG [hconnection-0xc51fa15-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T09:22:28,197 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36142, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T09:22:28,199 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:28,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36142 deadline: 1732785808199, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:28,201 DEBUG [hconnection-0x457d8e69-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T09:22:28,207 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T09:22:28,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] procedure2.ProcedureExecutor(1098): Stored pid=106, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees 2024-11-28T09:22:28,209 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=106, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T09:22:28,210 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36148, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T09:22:28,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-28T09:22:28,210 DEBUG [hconnection-0x5bd90366-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T09:22:28,211 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=106, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T09:22:28,211 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=107, ppid=106, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T09:22:28,212 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36160, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T09:22:28,214 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:28,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36160 deadline: 1732785808214, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:28,220 DEBUG [hconnection-0x38294973-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T09:22:28,221 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36174, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T09:22:28,222 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:28,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732785808222, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:28,237 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411289ff28590b9294e5bb3dbc215bcdabd34_142d3ce9e2b1d2945b57067dd3e37abc is 50, key is test_row_0/A:col10/1732785748146/Put/seqid=0 2024-11-28T09:22:28,240 DEBUG [hconnection-0x65eb3dcb-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T09:22:28,241 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36180, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T09:22:28,252 DEBUG [hconnection-0x4c9fb017-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T09:22:28,253 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36188, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T09:22:28,272 DEBUG [hconnection-0x1b5e2a64-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T09:22:28,273 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36200, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T09:22:28,276 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:28,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36200 deadline: 1732785808276, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:28,296 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:28,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36112 deadline: 1732785808296, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:28,301 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:28,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36142 deadline: 1732785808300, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:28,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742260_1436 (size=12154) 2024-11-28T09:22:28,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-28T09:22:28,315 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:28,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36160 deadline: 1732785808315, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:28,324 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:28,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732785808324, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:28,362 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:28,363 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-28T09:22:28,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:28,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. as already flushing 2024-11-28T09:22:28,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:28,363 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:28,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:28,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:28,382 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:28,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36200 deadline: 1732785808377, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:28,498 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:28,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36112 deadline: 1732785808498, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:28,503 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:28,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36142 deadline: 1732785808503, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:28,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-28T09:22:28,517 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:28,517 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-28T09:22:28,517 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:28,517 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. as already flushing 2024-11-28T09:22:28,518 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:28,518 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:28,518 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:28,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:28,524 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:28,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36160 deadline: 1732785808523, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:28,526 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:28,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732785808525, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:28,585 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:28,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36200 deadline: 1732785808585, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:28,670 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:28,670 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-28T09:22:28,671 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:28,671 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. as already flushing 2024-11-28T09:22:28,671 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:28,671 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:28,671 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:28,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:28,703 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:28,708 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411289ff28590b9294e5bb3dbc215bcdabd34_142d3ce9e2b1d2945b57067dd3e37abc to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411289ff28590b9294e5bb3dbc215bcdabd34_142d3ce9e2b1d2945b57067dd3e37abc 2024-11-28T09:22:28,716 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/A/f07a8b5e93ff4deda21b8e465a99d477, store: [table=TestAcidGuarantees family=A region=142d3ce9e2b1d2945b57067dd3e37abc] 2024-11-28T09:22:28,717 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/A/f07a8b5e93ff4deda21b8e465a99d477 is 175, key is test_row_0/A:col10/1732785748146/Put/seqid=0 2024-11-28T09:22:28,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742261_1437 (size=30955) 2024-11-28T09:22:28,769 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=15, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/A/f07a8b5e93ff4deda21b8e465a99d477 2024-11-28T09:22:28,801 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:28,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36112 deadline: 1732785808801, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:28,808 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:28,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36142 deadline: 1732785808808, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:28,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-28T09:22:28,823 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:28,824 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-28T09:22:28,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:28,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. as already flushing 2024-11-28T09:22:28,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:28,824 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:28,825 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:28,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:28,827 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:28,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36160 deadline: 1732785808827, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:28,848 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:28,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732785808847, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:28,858 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/B/909969ad3f9f44c791011cc764319ed2 is 50, key is test_row_0/B:col10/1732785748146/Put/seqid=0 2024-11-28T09:22:28,888 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:28,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36200 deadline: 1732785808888, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:28,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742262_1438 (size=12001) 2024-11-28T09:22:28,977 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:28,981 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-28T09:22:28,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:28,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. as already flushing 2024-11-28T09:22:28,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:28,981 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:28,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:28,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:28,985 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-28T09:22:29,135 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:29,135 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-28T09:22:29,135 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:29,135 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. as already flushing 2024-11-28T09:22:29,135 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:29,136 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:29,136 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:29,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:29,288 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:29,289 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-28T09:22:29,289 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:29,289 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. as already flushing 2024-11-28T09:22:29,289 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:29,289 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:29,289 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:29,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:29,303 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/B/909969ad3f9f44c791011cc764319ed2 2024-11-28T09:22:29,311 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:29,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36112 deadline: 1732785809305, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:29,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-28T09:22:29,321 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:29,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36142 deadline: 1732785809318, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:29,336 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:29,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36160 deadline: 1732785809330, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:29,351 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/C/34028b9959c64cea9b800d9b9ae722aa is 50, key is test_row_0/C:col10/1732785748146/Put/seqid=0 2024-11-28T09:22:29,352 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:29,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732785809350, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:29,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742263_1439 (size=12001) 2024-11-28T09:22:29,383 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/C/34028b9959c64cea9b800d9b9ae722aa 2024-11-28T09:22:29,389 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/A/f07a8b5e93ff4deda21b8e465a99d477 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/f07a8b5e93ff4deda21b8e465a99d477 2024-11-28T09:22:29,395 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/f07a8b5e93ff4deda21b8e465a99d477, entries=150, sequenceid=15, filesize=30.2 K 2024-11-28T09:22:29,396 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/B/909969ad3f9f44c791011cc764319ed2 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/909969ad3f9f44c791011cc764319ed2 2024-11-28T09:22:29,402 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/909969ad3f9f44c791011cc764319ed2, entries=150, sequenceid=15, filesize=11.7 K 2024-11-28T09:22:29,402 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:29,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36200 deadline: 1732785809393, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:29,404 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/C/34028b9959c64cea9b800d9b9ae722aa as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/34028b9959c64cea9b800d9b9ae722aa 2024-11-28T09:22:29,408 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/34028b9959c64cea9b800d9b9ae722aa, entries=150, sequenceid=15, filesize=11.7 K 2024-11-28T09:22:29,410 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 142d3ce9e2b1d2945b57067dd3e37abc in 1258ms, sequenceid=15, compaction requested=false 2024-11-28T09:22:29,411 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-11-28T09:22:29,411 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 142d3ce9e2b1d2945b57067dd3e37abc: 2024-11-28T09:22:29,443 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:29,443 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-28T09:22:29,443 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:29,443 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2837): Flushing 142d3ce9e2b1d2945b57067dd3e37abc 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-28T09:22:29,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 142d3ce9e2b1d2945b57067dd3e37abc, store=A 2024-11-28T09:22:29,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:29,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 142d3ce9e2b1d2945b57067dd3e37abc, store=B 2024-11-28T09:22:29,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:29,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 142d3ce9e2b1d2945b57067dd3e37abc, store=C 2024-11-28T09:22:29,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:29,491 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128e215dfb9d5e84b93b45836be3511ef64_142d3ce9e2b1d2945b57067dd3e37abc is 50, key is test_row_0/A:col10/1732785748187/Put/seqid=0 2024-11-28T09:22:29,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742264_1440 (size=12154) 2024-11-28T09:22:29,919 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:29,923 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128e215dfb9d5e84b93b45836be3511ef64_142d3ce9e2b1d2945b57067dd3e37abc to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128e215dfb9d5e84b93b45836be3511ef64_142d3ce9e2b1d2945b57067dd3e37abc 2024-11-28T09:22:29,925 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/A/fbff41a849984052b04b647d54f31255, store: [table=TestAcidGuarantees family=A region=142d3ce9e2b1d2945b57067dd3e37abc] 2024-11-28T09:22:29,926 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/A/fbff41a849984052b04b647d54f31255 is 175, key is test_row_0/A:col10/1732785748187/Put/seqid=0 2024-11-28T09:22:29,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742265_1441 (size=30955) 2024-11-28T09:22:30,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-28T09:22:30,328 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. as already flushing 2024-11-28T09:22:30,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 142d3ce9e2b1d2945b57067dd3e37abc 2024-11-28T09:22:30,373 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=40, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/A/fbff41a849984052b04b647d54f31255 2024-11-28T09:22:30,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/B/88e7d440064c4b96bc12d1f2ffb0c169 is 50, key is test_row_0/B:col10/1732785748187/Put/seqid=0 2024-11-28T09:22:30,395 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:30,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36142 deadline: 1732785810387, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:30,403 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:30,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36112 deadline: 1732785810390, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:30,404 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:30,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732785810395, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:30,405 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:30,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36160 deadline: 1732785810395, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:30,419 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:30,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36200 deadline: 1732785810404, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:30,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742266_1442 (size=12001) 2024-11-28T09:22:30,446 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/B/88e7d440064c4b96bc12d1f2ffb0c169 2024-11-28T09:22:30,461 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/C/7309b2c8997c48808e7a634cf3a23b6c is 50, key is test_row_0/C:col10/1732785748187/Put/seqid=0 2024-11-28T09:22:30,508 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:30,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36142 deadline: 1732785810496, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:30,508 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:30,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36112 deadline: 1732785810505, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:30,509 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:30,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732785810505, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:30,509 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:30,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36160 deadline: 1732785810507, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:30,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742267_1443 (size=12001) 2024-11-28T09:22:30,513 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/C/7309b2c8997c48808e7a634cf3a23b6c 2024-11-28T09:22:30,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/A/fbff41a849984052b04b647d54f31255 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/fbff41a849984052b04b647d54f31255 2024-11-28T09:22:30,528 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/fbff41a849984052b04b647d54f31255, entries=150, sequenceid=40, filesize=30.2 K 2024-11-28T09:22:30,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/B/88e7d440064c4b96bc12d1f2ffb0c169 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/88e7d440064c4b96bc12d1f2ffb0c169 2024-11-28T09:22:30,533 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/88e7d440064c4b96bc12d1f2ffb0c169, entries=150, sequenceid=40, filesize=11.7 K 2024-11-28T09:22:30,534 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/C/7309b2c8997c48808e7a634cf3a23b6c as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/7309b2c8997c48808e7a634cf3a23b6c 2024-11-28T09:22:30,538 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/7309b2c8997c48808e7a634cf3a23b6c, entries=150, sequenceid=40, filesize=11.7 K 2024-11-28T09:22:30,539 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for 142d3ce9e2b1d2945b57067dd3e37abc in 1095ms, sequenceid=40, compaction requested=false 2024-11-28T09:22:30,539 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2538): Flush status journal for 142d3ce9e2b1d2945b57067dd3e37abc: 2024-11-28T09:22:30,539 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:30,539 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=107 2024-11-28T09:22:30,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4106): Remote procedure done, pid=107 2024-11-28T09:22:30,542 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=107, resume processing ppid=106 2024-11-28T09:22:30,542 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=107, ppid=106, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3290 sec 2024-11-28T09:22:30,547 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=106, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees in 2.3370 sec 2024-11-28T09:22:30,716 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 142d3ce9e2b1d2945b57067dd3e37abc 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-28T09:22:30,717 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 142d3ce9e2b1d2945b57067dd3e37abc, store=A 2024-11-28T09:22:30,717 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:30,717 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 142d3ce9e2b1d2945b57067dd3e37abc, store=B 2024-11-28T09:22:30,717 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:30,717 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 142d3ce9e2b1d2945b57067dd3e37abc, store=C 2024-11-28T09:22:30,717 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:30,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 142d3ce9e2b1d2945b57067dd3e37abc 2024-11-28T09:22:30,745 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112866b71ad7ac144b1aa103736dc0cd9316_142d3ce9e2b1d2945b57067dd3e37abc is 50, key is test_row_0/A:col10/1732785750387/Put/seqid=0 2024-11-28T09:22:30,767 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-28T09:22:30,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742268_1444 (size=14594) 2024-11-28T09:22:30,791 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:30,795 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112866b71ad7ac144b1aa103736dc0cd9316_142d3ce9e2b1d2945b57067dd3e37abc to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112866b71ad7ac144b1aa103736dc0cd9316_142d3ce9e2b1d2945b57067dd3e37abc 2024-11-28T09:22:30,796 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/A/c8e86c4b7c984ffd975bcf2efd887784, store: [table=TestAcidGuarantees family=A region=142d3ce9e2b1d2945b57067dd3e37abc] 2024-11-28T09:22:30,797 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/A/c8e86c4b7c984ffd975bcf2efd887784 is 175, key is test_row_0/A:col10/1732785750387/Put/seqid=0 2024-11-28T09:22:30,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742269_1445 (size=39549) 2024-11-28T09:22:30,839 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:30,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36142 deadline: 1732785810814, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:30,839 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:30,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36160 deadline: 1732785810814, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:30,840 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:30,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732785810815, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:30,842 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:30,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36112 deadline: 1732785810830, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:30,945 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:30,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36142 deadline: 1732785810941, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:30,946 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:30,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732785810941, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:30,949 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:30,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36160 deadline: 1732785810942, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:30,950 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:30,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36112 deadline: 1732785810943, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:31,158 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:31,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36142 deadline: 1732785811149, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:31,161 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:31,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36160 deadline: 1732785811152, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:31,162 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:31,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732785811152, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:31,162 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:31,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36112 deadline: 1732785811153, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:31,232 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=53, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/A/c8e86c4b7c984ffd975bcf2efd887784 2024-11-28T09:22:31,253 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/B/f2d7c81b54594d8e9f18e931ece1166d is 50, key is test_row_0/B:col10/1732785750387/Put/seqid=0 2024-11-28T09:22:31,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742270_1446 (size=12001) 2024-11-28T09:22:31,314 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=53 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/B/f2d7c81b54594d8e9f18e931ece1166d 2024-11-28T09:22:31,329 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/C/cc0cfa95b90040fdbe1d7e262693829b is 50, key is test_row_0/C:col10/1732785750387/Put/seqid=0 2024-11-28T09:22:31,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742271_1447 (size=12001) 2024-11-28T09:22:31,376 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=53 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/C/cc0cfa95b90040fdbe1d7e262693829b 2024-11-28T09:22:31,383 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/A/c8e86c4b7c984ffd975bcf2efd887784 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/c8e86c4b7c984ffd975bcf2efd887784 2024-11-28T09:22:31,387 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/c8e86c4b7c984ffd975bcf2efd887784, entries=200, sequenceid=53, filesize=38.6 K 2024-11-28T09:22:31,388 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/B/f2d7c81b54594d8e9f18e931ece1166d as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/f2d7c81b54594d8e9f18e931ece1166d 2024-11-28T09:22:31,393 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/f2d7c81b54594d8e9f18e931ece1166d, entries=150, sequenceid=53, filesize=11.7 K 2024-11-28T09:22:31,393 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/C/cc0cfa95b90040fdbe1d7e262693829b as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/cc0cfa95b90040fdbe1d7e262693829b 2024-11-28T09:22:31,399 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/cc0cfa95b90040fdbe1d7e262693829b, entries=150, sequenceid=53, filesize=11.7 K 2024-11-28T09:22:31,400 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for 142d3ce9e2b1d2945b57067dd3e37abc in 684ms, sequenceid=53, compaction requested=true 2024-11-28T09:22:31,400 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 142d3ce9e2b1d2945b57067dd3e37abc: 2024-11-28T09:22:31,400 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 142d3ce9e2b1d2945b57067dd3e37abc:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T09:22:31,400 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:22:31,400 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 142d3ce9e2b1d2945b57067dd3e37abc:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T09:22:31,400 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:22:31,400 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:22:31,400 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 142d3ce9e2b1d2945b57067dd3e37abc:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T09:22:31,400 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-28T09:22:31,400 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:22:31,401 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101459 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:22:31,402 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1540): 142d3ce9e2b1d2945b57067dd3e37abc/A is initiating minor compaction (all files) 2024-11-28T09:22:31,402 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 142d3ce9e2b1d2945b57067dd3e37abc/A in TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:31,402 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/f07a8b5e93ff4deda21b8e465a99d477, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/fbff41a849984052b04b647d54f31255, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/c8e86c4b7c984ffd975bcf2efd887784] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp, totalSize=99.1 K 2024-11-28T09:22:31,402 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:31,402 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:22:31,402 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): 142d3ce9e2b1d2945b57067dd3e37abc/B is initiating minor compaction (all files) 2024-11-28T09:22:31,402 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. files: [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/f07a8b5e93ff4deda21b8e465a99d477, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/fbff41a849984052b04b647d54f31255, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/c8e86c4b7c984ffd975bcf2efd887784] 2024-11-28T09:22:31,402 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 142d3ce9e2b1d2945b57067dd3e37abc/B in TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:31,402 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/909969ad3f9f44c791011cc764319ed2, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/88e7d440064c4b96bc12d1f2ffb0c169, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/f2d7c81b54594d8e9f18e931ece1166d] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp, totalSize=35.2 K 2024-11-28T09:22:31,402 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 909969ad3f9f44c791011cc764319ed2, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1732785748146 2024-11-28T09:22:31,403 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 88e7d440064c4b96bc12d1f2ffb0c169, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1732785748183 2024-11-28T09:22:31,403 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting f2d7c81b54594d8e9f18e931ece1166d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732785750387 2024-11-28T09:22:31,410 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting f07a8b5e93ff4deda21b8e465a99d477, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1732785748146 2024-11-28T09:22:31,410 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting fbff41a849984052b04b647d54f31255, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1732785748183 2024-11-28T09:22:31,411 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting c8e86c4b7c984ffd975bcf2efd887784, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732785750386 2024-11-28T09:22:31,425 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 142d3ce9e2b1d2945b57067dd3e37abc#B#compaction#387 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:22:31,425 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/B/546b38a4c399464bb0981a273c8b80d6 is 50, key is test_row_0/B:col10/1732785750387/Put/seqid=0 2024-11-28T09:22:31,436 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=142d3ce9e2b1d2945b57067dd3e37abc] 2024-11-28T09:22:31,458 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241128797f4e6fc3274e809e33acde50e512e6_142d3ce9e2b1d2945b57067dd3e37abc store=[table=TestAcidGuarantees family=A region=142d3ce9e2b1d2945b57067dd3e37abc] 2024-11-28T09:22:31,461 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241128797f4e6fc3274e809e33acde50e512e6_142d3ce9e2b1d2945b57067dd3e37abc, store=[table=TestAcidGuarantees family=A region=142d3ce9e2b1d2945b57067dd3e37abc] 2024-11-28T09:22:31,461 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128797f4e6fc3274e809e33acde50e512e6_142d3ce9e2b1d2945b57067dd3e37abc because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=142d3ce9e2b1d2945b57067dd3e37abc] 2024-11-28T09:22:31,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 142d3ce9e2b1d2945b57067dd3e37abc 2024-11-28T09:22:31,475 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 142d3ce9e2b1d2945b57067dd3e37abc 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-28T09:22:31,475 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 142d3ce9e2b1d2945b57067dd3e37abc, store=A 2024-11-28T09:22:31,475 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:31,475 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 142d3ce9e2b1d2945b57067dd3e37abc, store=B 2024-11-28T09:22:31,475 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:31,475 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 142d3ce9e2b1d2945b57067dd3e37abc, store=C 2024-11-28T09:22:31,475 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:31,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742272_1448 (size=12104) 2024-11-28T09:22:31,484 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/B/546b38a4c399464bb0981a273c8b80d6 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/546b38a4c399464bb0981a273c8b80d6 2024-11-28T09:22:31,491 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 142d3ce9e2b1d2945b57067dd3e37abc/B of 142d3ce9e2b1d2945b57067dd3e37abc into 546b38a4c399464bb0981a273c8b80d6(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:22:31,491 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 142d3ce9e2b1d2945b57067dd3e37abc: 2024-11-28T09:22:31,491 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc., storeName=142d3ce9e2b1d2945b57067dd3e37abc/B, priority=13, startTime=1732785751400; duration=0sec 2024-11-28T09:22:31,491 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:22:31,491 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 142d3ce9e2b1d2945b57067dd3e37abc:B 2024-11-28T09:22:31,491 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:22:31,492 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:22:31,492 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): 142d3ce9e2b1d2945b57067dd3e37abc/C is initiating minor compaction (all files) 2024-11-28T09:22:31,492 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 142d3ce9e2b1d2945b57067dd3e37abc/C in TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:31,492 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/34028b9959c64cea9b800d9b9ae722aa, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/7309b2c8997c48808e7a634cf3a23b6c, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/cc0cfa95b90040fdbe1d7e262693829b] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp, totalSize=35.2 K 2024-11-28T09:22:31,493 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 34028b9959c64cea9b800d9b9ae722aa, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1732785748146 2024-11-28T09:22:31,493 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 7309b2c8997c48808e7a634cf3a23b6c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1732785748183 2024-11-28T09:22:31,493 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting cc0cfa95b90040fdbe1d7e262693829b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732785750387 2024-11-28T09:22:31,524 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:31,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36160 deadline: 1732785811504, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:31,524 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:31,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36112 deadline: 1732785811505, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:31,525 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:31,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732785811505, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:31,525 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:31,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36142 deadline: 1732785811506, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:31,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742273_1449 (size=4469) 2024-11-28T09:22:31,538 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 142d3ce9e2b1d2945b57067dd3e37abc#C#compaction#389 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:22:31,539 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/C/ee1b5b1ee1f749f9b0506a3985af4d2d is 50, key is test_row_0/C:col10/1732785750387/Put/seqid=0 2024-11-28T09:22:31,551 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112890c8b116dce24af4b36574aed387c44e_142d3ce9e2b1d2945b57067dd3e37abc is 50, key is test_row_0/A:col10/1732785751473/Put/seqid=0 2024-11-28T09:22:31,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742274_1450 (size=12104) 2024-11-28T09:22:31,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742275_1451 (size=14594) 2024-11-28T09:22:31,607 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:31,613 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112890c8b116dce24af4b36574aed387c44e_142d3ce9e2b1d2945b57067dd3e37abc to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112890c8b116dce24af4b36574aed387c44e_142d3ce9e2b1d2945b57067dd3e37abc 2024-11-28T09:22:31,614 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/A/8e63850c7286463da5a4e999f9bd18ee, store: [table=TestAcidGuarantees family=A region=142d3ce9e2b1d2945b57067dd3e37abc] 2024-11-28T09:22:31,615 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/A/8e63850c7286463da5a4e999f9bd18ee is 175, key is test_row_0/A:col10/1732785751473/Put/seqid=0 2024-11-28T09:22:31,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742276_1452 (size=39549) 2024-11-28T09:22:31,638 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=78, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/A/8e63850c7286463da5a4e999f9bd18ee 2024-11-28T09:22:31,640 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:31,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36112 deadline: 1732785811626, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:31,641 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:31,641 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:31,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732785811626, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:31,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36142 deadline: 1732785811626, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:31,641 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:31,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36160 deadline: 1732785811630, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:31,645 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/B/7961d4033b0c43f8be5154936fbacb3f is 50, key is test_row_0/B:col10/1732785751473/Put/seqid=0 2024-11-28T09:22:31,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742277_1453 (size=12001) 2024-11-28T09:22:31,709 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/B/7961d4033b0c43f8be5154936fbacb3f 2024-11-28T09:22:31,727 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/C/d3a54be57f19461981e9637f94a2468e is 50, key is test_row_0/C:col10/1732785751473/Put/seqid=0 2024-11-28T09:22:31,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742278_1454 (size=12001) 2024-11-28T09:22:31,854 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:31,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36142 deadline: 1732785811842, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:31,854 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:31,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36160 deadline: 1732785811843, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:31,854 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:31,854 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:31,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36112 deadline: 1732785811843, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:31,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732785811843, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:31,935 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 142d3ce9e2b1d2945b57067dd3e37abc#A#compaction#388 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:22:31,935 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/A/c0b466dab6014952b381e78136dc6a32 is 175, key is test_row_0/A:col10/1732785750387/Put/seqid=0 2024-11-28T09:22:31,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742279_1455 (size=31058) 2024-11-28T09:22:31,990 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/C/ee1b5b1ee1f749f9b0506a3985af4d2d as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/ee1b5b1ee1f749f9b0506a3985af4d2d 2024-11-28T09:22:31,997 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 142d3ce9e2b1d2945b57067dd3e37abc/C of 142d3ce9e2b1d2945b57067dd3e37abc into ee1b5b1ee1f749f9b0506a3985af4d2d(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:22:31,997 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 142d3ce9e2b1d2945b57067dd3e37abc: 2024-11-28T09:22:31,997 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc., storeName=142d3ce9e2b1d2945b57067dd3e37abc/C, priority=13, startTime=1732785751400; duration=0sec 2024-11-28T09:22:31,997 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:22:31,997 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 142d3ce9e2b1d2945b57067dd3e37abc:C 2024-11-28T09:22:32,161 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/C/d3a54be57f19461981e9637f94a2468e 2024-11-28T09:22:32,168 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/A/8e63850c7286463da5a4e999f9bd18ee as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/8e63850c7286463da5a4e999f9bd18ee 2024-11-28T09:22:32,170 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:32,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36112 deadline: 1732785812156, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:32,171 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:32,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36160 deadline: 1732785812156, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:32,171 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:32,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732785812157, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:32,171 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:32,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36142 deadline: 1732785812157, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:32,174 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/8e63850c7286463da5a4e999f9bd18ee, entries=200, sequenceid=78, filesize=38.6 K 2024-11-28T09:22:32,176 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/B/7961d4033b0c43f8be5154936fbacb3f as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/7961d4033b0c43f8be5154936fbacb3f 2024-11-28T09:22:32,192 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/7961d4033b0c43f8be5154936fbacb3f, entries=150, sequenceid=78, filesize=11.7 K 2024-11-28T09:22:32,195 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/C/d3a54be57f19461981e9637f94a2468e as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/d3a54be57f19461981e9637f94a2468e 2024-11-28T09:22:32,208 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/d3a54be57f19461981e9637f94a2468e, entries=150, sequenceid=78, filesize=11.7 K 2024-11-28T09:22:32,209 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 142d3ce9e2b1d2945b57067dd3e37abc in 735ms, sequenceid=78, compaction requested=false 2024-11-28T09:22:32,210 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 142d3ce9e2b1d2945b57067dd3e37abc: 2024-11-28T09:22:32,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-28T09:22:32,315 INFO [Thread-1930 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 106 completed 2024-11-28T09:22:32,317 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T09:22:32,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] procedure2.ProcedureExecutor(1098): Stored pid=108, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees 2024-11-28T09:22:32,319 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=108, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T09:22:32,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-28T09:22:32,321 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=108, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T09:22:32,321 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=109, ppid=108, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T09:22:32,393 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/A/c0b466dab6014952b381e78136dc6a32 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/c0b466dab6014952b381e78136dc6a32 2024-11-28T09:22:32,400 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 142d3ce9e2b1d2945b57067dd3e37abc/A of 142d3ce9e2b1d2945b57067dd3e37abc into c0b466dab6014952b381e78136dc6a32(size=30.3 K), total size for store is 69.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:22:32,400 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 142d3ce9e2b1d2945b57067dd3e37abc: 2024-11-28T09:22:32,400 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc., storeName=142d3ce9e2b1d2945b57067dd3e37abc/A, priority=13, startTime=1732785751400; duration=0sec 2024-11-28T09:22:32,400 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:22:32,400 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 142d3ce9e2b1d2945b57067dd3e37abc:A 2024-11-28T09:22:32,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-28T09:22:32,443 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 142d3ce9e2b1d2945b57067dd3e37abc 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-28T09:22:32,443 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 142d3ce9e2b1d2945b57067dd3e37abc, store=A 2024-11-28T09:22:32,443 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:32,443 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 142d3ce9e2b1d2945b57067dd3e37abc, store=B 2024-11-28T09:22:32,443 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:32,443 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 142d3ce9e2b1d2945b57067dd3e37abc, store=C 2024-11-28T09:22:32,443 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:32,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 142d3ce9e2b1d2945b57067dd3e37abc 2024-11-28T09:22:32,474 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:32,474 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-28T09:22:32,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:32,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. as already flushing 2024-11-28T09:22:32,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:32,475 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:32,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:32,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:32,478 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112861cc2ceb5cdc48afa48c48d31b44cd71_142d3ce9e2b1d2945b57067dd3e37abc is 50, key is test_row_0/A:col10/1732785752441/Put/seqid=0 2024-11-28T09:22:32,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742280_1456 (size=14594) 2024-11-28T09:22:32,527 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:32,531 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112861cc2ceb5cdc48afa48c48d31b44cd71_142d3ce9e2b1d2945b57067dd3e37abc to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112861cc2ceb5cdc48afa48c48d31b44cd71_142d3ce9e2b1d2945b57067dd3e37abc 2024-11-28T09:22:32,532 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/A/f6ad92541d9745e380ad0b4cb3591575, store: [table=TestAcidGuarantees family=A region=142d3ce9e2b1d2945b57067dd3e37abc] 2024-11-28T09:22:32,533 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/A/f6ad92541d9745e380ad0b4cb3591575 is 175, key is test_row_0/A:col10/1732785752441/Put/seqid=0 2024-11-28T09:22:32,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742281_1457 (size=39549) 2024-11-28T09:22:32,572 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=93, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/A/f6ad92541d9745e380ad0b4cb3591575 2024-11-28T09:22:32,588 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/B/4ca81648963b408682ed9e76242222d4 is 50, key is test_row_0/B:col10/1732785752441/Put/seqid=0 2024-11-28T09:22:32,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-28T09:22:32,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742282_1458 (size=12001) 2024-11-28T09:22:32,627 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:32,628 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-28T09:22:32,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:32,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. as already flushing 2024-11-28T09:22:32,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:32,628 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:32,628 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:32,630 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/B/4ca81648963b408682ed9e76242222d4 2024-11-28T09:22:32,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:32,649 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/C/0c37760fe8014ee493983c128709d3e2 is 50, key is test_row_0/C:col10/1732785752441/Put/seqid=0 2024-11-28T09:22:32,692 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:32,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36160 deadline: 1732785812676, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:32,693 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:32,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36112 deadline: 1732785812677, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:32,696 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:32,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36200 deadline: 1732785812684, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:32,698 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:32,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732785812687, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:32,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742283_1459 (size=12001) 2024-11-28T09:22:32,700 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/C/0c37760fe8014ee493983c128709d3e2 2024-11-28T09:22:32,700 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:32,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36142 deadline: 1732785812688, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:32,705 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/A/f6ad92541d9745e380ad0b4cb3591575 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/f6ad92541d9745e380ad0b4cb3591575 2024-11-28T09:22:32,708 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/f6ad92541d9745e380ad0b4cb3591575, entries=200, sequenceid=93, filesize=38.6 K 2024-11-28T09:22:32,710 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/B/4ca81648963b408682ed9e76242222d4 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/4ca81648963b408682ed9e76242222d4 2024-11-28T09:22:32,714 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/4ca81648963b408682ed9e76242222d4, entries=150, sequenceid=93, filesize=11.7 K 2024-11-28T09:22:32,715 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/C/0c37760fe8014ee493983c128709d3e2 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/0c37760fe8014ee493983c128709d3e2 2024-11-28T09:22:32,719 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/0c37760fe8014ee493983c128709d3e2, entries=150, sequenceid=93, filesize=11.7 K 2024-11-28T09:22:32,720 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 142d3ce9e2b1d2945b57067dd3e37abc in 277ms, sequenceid=93, compaction requested=true 2024-11-28T09:22:32,720 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 142d3ce9e2b1d2945b57067dd3e37abc: 2024-11-28T09:22:32,720 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:22:32,722 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 110156 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:22:32,722 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1540): 142d3ce9e2b1d2945b57067dd3e37abc/A is initiating minor compaction (all files) 2024-11-28T09:22:32,722 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 142d3ce9e2b1d2945b57067dd3e37abc/A in TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:32,723 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/c0b466dab6014952b381e78136dc6a32, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/8e63850c7286463da5a4e999f9bd18ee, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/f6ad92541d9745e380ad0b4cb3591575] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp, totalSize=107.6 K 2024-11-28T09:22:32,723 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:32,723 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. files: [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/c0b466dab6014952b381e78136dc6a32, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/8e63850c7286463da5a4e999f9bd18ee, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/f6ad92541d9745e380ad0b4cb3591575] 2024-11-28T09:22:32,723 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 142d3ce9e2b1d2945b57067dd3e37abc:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T09:22:32,723 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:22:32,723 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting c0b466dab6014952b381e78136dc6a32, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732785750387 2024-11-28T09:22:32,723 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8e63850c7286463da5a4e999f9bd18ee, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1732785750750 2024-11-28T09:22:32,724 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:22:32,724 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting f6ad92541d9745e380ad0b4cb3591575, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1732785751501 2024-11-28T09:22:32,725 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:22:32,725 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): 142d3ce9e2b1d2945b57067dd3e37abc/B is initiating minor compaction (all files) 2024-11-28T09:22:32,726 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 142d3ce9e2b1d2945b57067dd3e37abc/B in TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:32,726 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/546b38a4c399464bb0981a273c8b80d6, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/7961d4033b0c43f8be5154936fbacb3f, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/4ca81648963b408682ed9e76242222d4] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp, totalSize=35.3 K 2024-11-28T09:22:32,726 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 546b38a4c399464bb0981a273c8b80d6, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732785750387 2024-11-28T09:22:32,727 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 7961d4033b0c43f8be5154936fbacb3f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1732785750800 2024-11-28T09:22:32,728 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 4ca81648963b408682ed9e76242222d4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1732785751501 2024-11-28T09:22:32,735 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 142d3ce9e2b1d2945b57067dd3e37abc:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T09:22:32,736 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:22:32,737 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 142d3ce9e2b1d2945b57067dd3e37abc:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T09:22:32,737 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:22:32,747 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=142d3ce9e2b1d2945b57067dd3e37abc] 2024-11-28T09:22:32,758 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 142d3ce9e2b1d2945b57067dd3e37abc#B#compaction#397 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:22:32,759 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/B/edfdfd1bf3364618b5a3611780130a48 is 50, key is test_row_0/B:col10/1732785752441/Put/seqid=0 2024-11-28T09:22:32,769 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241128dcd2b0d5ad44449bbecafddd7b3566e8_142d3ce9e2b1d2945b57067dd3e37abc store=[table=TestAcidGuarantees family=A region=142d3ce9e2b1d2945b57067dd3e37abc] 2024-11-28T09:22:32,771 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241128dcd2b0d5ad44449bbecafddd7b3566e8_142d3ce9e2b1d2945b57067dd3e37abc, store=[table=TestAcidGuarantees family=A region=142d3ce9e2b1d2945b57067dd3e37abc] 2024-11-28T09:22:32,771 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128dcd2b0d5ad44449bbecafddd7b3566e8_142d3ce9e2b1d2945b57067dd3e37abc because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=142d3ce9e2b1d2945b57067dd3e37abc] 2024-11-28T09:22:32,781 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:32,782 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-28T09:22:32,782 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:32,782 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2837): Flushing 142d3ce9e2b1d2945b57067dd3e37abc 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-28T09:22:32,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 142d3ce9e2b1d2945b57067dd3e37abc, store=A 2024-11-28T09:22:32,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:32,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 142d3ce9e2b1d2945b57067dd3e37abc, store=B 2024-11-28T09:22:32,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:32,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 142d3ce9e2b1d2945b57067dd3e37abc, store=C 2024-11-28T09:22:32,783 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:32,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 142d3ce9e2b1d2945b57067dd3e37abc 2024-11-28T09:22:32,816 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. as already flushing 2024-11-28T09:22:32,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742285_1461 (size=4469) 2024-11-28T09:22:32,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742284_1460 (size=12207) 2024-11-28T09:22:32,863 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411286ee33d7a911d48b0847546dec785a533_142d3ce9e2b1d2945b57067dd3e37abc is 50, key is test_row_0/A:col10/1732785752647/Put/seqid=0 2024-11-28T09:22:32,917 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:32,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36200 deadline: 1732785812894, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:32,918 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:32,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732785812895, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:32,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-28T09:22:32,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742286_1462 (size=12154) 2024-11-28T09:22:32,941 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:32,958 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411286ee33d7a911d48b0847546dec785a533_142d3ce9e2b1d2945b57067dd3e37abc to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411286ee33d7a911d48b0847546dec785a533_142d3ce9e2b1d2945b57067dd3e37abc 2024-11-28T09:22:32,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/A/f91a499d276a4003a634dc506d93facd, store: [table=TestAcidGuarantees family=A region=142d3ce9e2b1d2945b57067dd3e37abc] 2024-11-28T09:22:32,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/A/f91a499d276a4003a634dc506d93facd is 175, key is test_row_0/A:col10/1732785752647/Put/seqid=0 2024-11-28T09:22:33,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742287_1463 (size=30955) 2024-11-28T09:22:33,006 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=117, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/A/f91a499d276a4003a634dc506d93facd 2024-11-28T09:22:33,027 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/B/5638a288bccb46b2b83e8cde211510cd is 50, key is test_row_0/B:col10/1732785752647/Put/seqid=0 2024-11-28T09:22:33,035 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:33,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36200 deadline: 1732785813019, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:33,035 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:33,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732785813019, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:33,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742288_1464 (size=12001) 2024-11-28T09:22:33,095 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/B/5638a288bccb46b2b83e8cde211510cd 2024-11-28T09:22:33,112 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/C/907f7a5605db4fd4a05d8ddc1610d455 is 50, key is test_row_0/C:col10/1732785752647/Put/seqid=0 2024-11-28T09:22:33,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742289_1465 (size=12001) 2024-11-28T09:22:33,235 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 142d3ce9e2b1d2945b57067dd3e37abc#A#compaction#396 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:22:33,235 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/A/d576e47544724479abff7d83f7e68590 is 175, key is test_row_0/A:col10/1732785752441/Put/seqid=0 2024-11-28T09:22:33,244 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/B/edfdfd1bf3364618b5a3611780130a48 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/edfdfd1bf3364618b5a3611780130a48 2024-11-28T09:22:33,248 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 142d3ce9e2b1d2945b57067dd3e37abc/B of 142d3ce9e2b1d2945b57067dd3e37abc into edfdfd1bf3364618b5a3611780130a48(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:22:33,248 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 142d3ce9e2b1d2945b57067dd3e37abc: 2024-11-28T09:22:33,248 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc., storeName=142d3ce9e2b1d2945b57067dd3e37abc/B, priority=13, startTime=1732785752724; duration=0sec 2024-11-28T09:22:33,249 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:22:33,249 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 142d3ce9e2b1d2945b57067dd3e37abc:B 2024-11-28T09:22:33,249 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:22:33,249 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:22:33,250 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): 142d3ce9e2b1d2945b57067dd3e37abc/C is initiating minor compaction (all files) 2024-11-28T09:22:33,250 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 142d3ce9e2b1d2945b57067dd3e37abc/C in TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:33,250 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/ee1b5b1ee1f749f9b0506a3985af4d2d, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/d3a54be57f19461981e9637f94a2468e, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/0c37760fe8014ee493983c128709d3e2] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp, totalSize=35.3 K 2024-11-28T09:22:33,250 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting ee1b5b1ee1f749f9b0506a3985af4d2d, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=53, earliestPutTs=1732785750387 2024-11-28T09:22:33,251 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting d3a54be57f19461981e9637f94a2468e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1732785750800 2024-11-28T09:22:33,251 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 0c37760fe8014ee493983c128709d3e2, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1732785751501 2024-11-28T09:22:33,264 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:33,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732785813243, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:33,266 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:33,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36200 deadline: 1732785813243, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:33,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742290_1466 (size=31161) 2024-11-28T09:22:33,296 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/A/d576e47544724479abff7d83f7e68590 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/d576e47544724479abff7d83f7e68590 2024-11-28T09:22:33,297 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 142d3ce9e2b1d2945b57067dd3e37abc#C#compaction#401 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:22:33,298 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/C/637e913db3834921bc81f7db702dd62f is 50, key is test_row_0/C:col10/1732785752441/Put/seqid=0 2024-11-28T09:22:33,301 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 142d3ce9e2b1d2945b57067dd3e37abc/A of 142d3ce9e2b1d2945b57067dd3e37abc into d576e47544724479abff7d83f7e68590(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:22:33,301 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 142d3ce9e2b1d2945b57067dd3e37abc: 2024-11-28T09:22:33,301 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc., storeName=142d3ce9e2b1d2945b57067dd3e37abc/A, priority=13, startTime=1732785752720; duration=0sec 2024-11-28T09:22:33,302 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:22:33,302 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 142d3ce9e2b1d2945b57067dd3e37abc:A 2024-11-28T09:22:33,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742291_1467 (size=12207) 2024-11-28T09:22:33,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-28T09:22:33,574 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/C/907f7a5605db4fd4a05d8ddc1610d455 2024-11-28T09:22:33,579 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:33,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732785813575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:33,586 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/A/f91a499d276a4003a634dc506d93facd as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/f91a499d276a4003a634dc506d93facd 2024-11-28T09:22:33,591 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/f91a499d276a4003a634dc506d93facd, entries=150, sequenceid=117, filesize=30.2 K 2024-11-28T09:22:33,592 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/B/5638a288bccb46b2b83e8cde211510cd as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/5638a288bccb46b2b83e8cde211510cd 2024-11-28T09:22:33,597 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:33,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36200 deadline: 1732785813589, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:33,600 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/5638a288bccb46b2b83e8cde211510cd, entries=150, sequenceid=117, filesize=11.7 K 2024-11-28T09:22:33,601 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/C/907f7a5605db4fd4a05d8ddc1610d455 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/907f7a5605db4fd4a05d8ddc1610d455 2024-11-28T09:22:33,614 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/907f7a5605db4fd4a05d8ddc1610d455, entries=150, sequenceid=117, filesize=11.7 K 2024-11-28T09:22:33,615 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 142d3ce9e2b1d2945b57067dd3e37abc in 833ms, sequenceid=117, compaction requested=false 2024-11-28T09:22:33,615 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2538): Flush status journal for 142d3ce9e2b1d2945b57067dd3e37abc: 2024-11-28T09:22:33,615 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:33,615 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=109 2024-11-28T09:22:33,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4106): Remote procedure done, pid=109 2024-11-28T09:22:33,618 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=109, resume processing ppid=108 2024-11-28T09:22:33,618 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=109, ppid=108, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2950 sec 2024-11-28T09:22:33,620 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=108, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees in 1.3010 sec 2024-11-28T09:22:33,708 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 142d3ce9e2b1d2945b57067dd3e37abc 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-28T09:22:33,708 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 142d3ce9e2b1d2945b57067dd3e37abc, store=A 2024-11-28T09:22:33,708 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:33,708 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 142d3ce9e2b1d2945b57067dd3e37abc, store=B 2024-11-28T09:22:33,708 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:33,708 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 142d3ce9e2b1d2945b57067dd3e37abc, store=C 2024-11-28T09:22:33,709 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:33,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 142d3ce9e2b1d2945b57067dd3e37abc 2024-11-28T09:22:33,733 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128cbb59c52858a477ab4631a67ce2ef6f1_142d3ce9e2b1d2945b57067dd3e37abc is 50, key is test_row_0/A:col10/1732785752816/Put/seqid=0 2024-11-28T09:22:33,740 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/C/637e913db3834921bc81f7db702dd62f as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/637e913db3834921bc81f7db702dd62f 2024-11-28T09:22:33,746 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 142d3ce9e2b1d2945b57067dd3e37abc/C of 142d3ce9e2b1d2945b57067dd3e37abc into 637e913db3834921bc81f7db702dd62f(size=11.9 K), total size for store is 23.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:22:33,746 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 142d3ce9e2b1d2945b57067dd3e37abc: 2024-11-28T09:22:33,746 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc., storeName=142d3ce9e2b1d2945b57067dd3e37abc/C, priority=13, startTime=1732785752736; duration=0sec 2024-11-28T09:22:33,746 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:22:33,746 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 142d3ce9e2b1d2945b57067dd3e37abc:C 2024-11-28T09:22:33,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742292_1468 (size=14644) 2024-11-28T09:22:33,768 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:33,774 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128cbb59c52858a477ab4631a67ce2ef6f1_142d3ce9e2b1d2945b57067dd3e37abc to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128cbb59c52858a477ab4631a67ce2ef6f1_142d3ce9e2b1d2945b57067dd3e37abc 2024-11-28T09:22:33,775 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/A/0c043ed9e79b4fdb96c5264ae8185931, store: [table=TestAcidGuarantees family=A region=142d3ce9e2b1d2945b57067dd3e37abc] 2024-11-28T09:22:33,776 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/A/0c043ed9e79b4fdb96c5264ae8185931 is 175, key is test_row_0/A:col10/1732785752816/Put/seqid=0 2024-11-28T09:22:33,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742293_1469 (size=39599) 2024-11-28T09:22:33,812 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=132, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/A/0c043ed9e79b4fdb96c5264ae8185931 2024-11-28T09:22:33,822 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/B/00e0d34660104a85a6c1c13576666f73 is 50, key is test_row_0/B:col10/1732785752816/Put/seqid=0 2024-11-28T09:22:33,825 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:33,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36112 deadline: 1732785813816, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:33,826 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:33,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36142 deadline: 1732785813817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:33,827 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:33,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36160 deadline: 1732785813818, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:33,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742294_1470 (size=12051) 2024-11-28T09:22:33,933 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:33,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36112 deadline: 1732785813927, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:33,933 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:33,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36160 deadline: 1732785813928, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:33,933 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:33,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36142 deadline: 1732785813928, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:34,086 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:34,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732785814083, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:34,110 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:34,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36200 deadline: 1732785814104, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:34,138 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:34,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36112 deadline: 1732785814134, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:34,139 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:34,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36142 deadline: 1732785814136, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:34,143 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:34,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36160 deadline: 1732785814136, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:34,249 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/B/00e0d34660104a85a6c1c13576666f73 2024-11-28T09:22:34,274 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/C/0fdb5d0e62e04065bab05d56402a2414 is 50, key is test_row_0/C:col10/1732785752816/Put/seqid=0 2024-11-28T09:22:34,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742295_1471 (size=12051) 2024-11-28T09:22:34,311 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/C/0fdb5d0e62e04065bab05d56402a2414 2024-11-28T09:22:34,323 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/A/0c043ed9e79b4fdb96c5264ae8185931 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/0c043ed9e79b4fdb96c5264ae8185931 2024-11-28T09:22:34,332 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/0c043ed9e79b4fdb96c5264ae8185931, entries=200, sequenceid=132, filesize=38.7 K 2024-11-28T09:22:34,333 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/B/00e0d34660104a85a6c1c13576666f73 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/00e0d34660104a85a6c1c13576666f73 2024-11-28T09:22:34,344 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/00e0d34660104a85a6c1c13576666f73, entries=150, sequenceid=132, filesize=11.8 K 2024-11-28T09:22:34,346 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/C/0fdb5d0e62e04065bab05d56402a2414 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/0fdb5d0e62e04065bab05d56402a2414 2024-11-28T09:22:34,356 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/0fdb5d0e62e04065bab05d56402a2414, entries=150, sequenceid=132, filesize=11.8 K 2024-11-28T09:22:34,357 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 142d3ce9e2b1d2945b57067dd3e37abc in 649ms, sequenceid=132, compaction requested=true 2024-11-28T09:22:34,357 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 142d3ce9e2b1d2945b57067dd3e37abc: 2024-11-28T09:22:34,357 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 142d3ce9e2b1d2945b57067dd3e37abc:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T09:22:34,357 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:22:34,357 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 142d3ce9e2b1d2945b57067dd3e37abc:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T09:22:34,357 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:22:34,357 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:22:34,357 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 142d3ce9e2b1d2945b57067dd3e37abc:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T09:22:34,357 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-28T09:22:34,358 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:22:34,358 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101715 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:22:34,358 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1540): 142d3ce9e2b1d2945b57067dd3e37abc/A is initiating minor compaction (all files) 2024-11-28T09:22:34,358 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 142d3ce9e2b1d2945b57067dd3e37abc/A in TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:34,359 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/d576e47544724479abff7d83f7e68590, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/f91a499d276a4003a634dc506d93facd, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/0c043ed9e79b4fdb96c5264ae8185931] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp, totalSize=99.3 K 2024-11-28T09:22:34,359 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:34,359 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. files: [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/d576e47544724479abff7d83f7e68590, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/f91a499d276a4003a634dc506d93facd, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/0c043ed9e79b4fdb96c5264ae8185931] 2024-11-28T09:22:34,359 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36259 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:22:34,359 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): 142d3ce9e2b1d2945b57067dd3e37abc/B is initiating minor compaction (all files) 2024-11-28T09:22:34,359 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 142d3ce9e2b1d2945b57067dd3e37abc/B in TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:34,359 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/edfdfd1bf3364618b5a3611780130a48, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/5638a288bccb46b2b83e8cde211510cd, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/00e0d34660104a85a6c1c13576666f73] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp, totalSize=35.4 K 2024-11-28T09:22:34,359 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting d576e47544724479abff7d83f7e68590, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1732785751501 2024-11-28T09:22:34,359 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting edfdfd1bf3364618b5a3611780130a48, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1732785751501 2024-11-28T09:22:34,360 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting f91a499d276a4003a634dc506d93facd, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1732785752647 2024-11-28T09:22:34,360 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 5638a288bccb46b2b83e8cde211510cd, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1732785752647 2024-11-28T09:22:34,360 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0c043ed9e79b4fdb96c5264ae8185931, keycount=200, bloomtype=ROW, size=38.7 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1732785752816 2024-11-28T09:22:34,360 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 00e0d34660104a85a6c1c13576666f73, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1732785752816 2024-11-28T09:22:34,378 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 142d3ce9e2b1d2945b57067dd3e37abc#B#compaction#405 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:22:34,379 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/B/376a3666bbfa40e68d40b0c944620255 is 50, key is test_row_0/B:col10/1732785752816/Put/seqid=0 2024-11-28T09:22:34,389 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=142d3ce9e2b1d2945b57067dd3e37abc] 2024-11-28T09:22:34,412 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411280d0b00753b4146f48eccc9b3c168f44b_142d3ce9e2b1d2945b57067dd3e37abc store=[table=TestAcidGuarantees family=A region=142d3ce9e2b1d2945b57067dd3e37abc] 2024-11-28T09:22:34,415 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411280d0b00753b4146f48eccc9b3c168f44b_142d3ce9e2b1d2945b57067dd3e37abc, store=[table=TestAcidGuarantees family=A region=142d3ce9e2b1d2945b57067dd3e37abc] 2024-11-28T09:22:34,415 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411280d0b00753b4146f48eccc9b3c168f44b_142d3ce9e2b1d2945b57067dd3e37abc because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=142d3ce9e2b1d2945b57067dd3e37abc] 2024-11-28T09:22:34,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-28T09:22:34,428 INFO [Thread-1930 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 108 completed 2024-11-28T09:22:34,430 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T09:22:34,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742296_1472 (size=12359) 2024-11-28T09:22:34,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] procedure2.ProcedureExecutor(1098): Stored pid=110, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees 2024-11-28T09:22:34,432 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=110, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T09:22:34,433 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=110, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T09:22:34,434 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=111, ppid=110, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T09:22:34,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-28T09:22:34,437 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/B/376a3666bbfa40e68d40b0c944620255 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/376a3666bbfa40e68d40b0c944620255 2024-11-28T09:22:34,443 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 142d3ce9e2b1d2945b57067dd3e37abc/B of 142d3ce9e2b1d2945b57067dd3e37abc into 376a3666bbfa40e68d40b0c944620255(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:22:34,443 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 142d3ce9e2b1d2945b57067dd3e37abc: 2024-11-28T09:22:34,443 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc., storeName=142d3ce9e2b1d2945b57067dd3e37abc/B, priority=13, startTime=1732785754357; duration=0sec 2024-11-28T09:22:34,443 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:22:34,443 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 142d3ce9e2b1d2945b57067dd3e37abc:B 2024-11-28T09:22:34,443 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:22:34,446 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36259 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:22:34,446 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): 142d3ce9e2b1d2945b57067dd3e37abc/C is initiating minor compaction (all files) 2024-11-28T09:22:34,446 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 142d3ce9e2b1d2945b57067dd3e37abc/C in TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:34,446 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/637e913db3834921bc81f7db702dd62f, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/907f7a5605db4fd4a05d8ddc1610d455, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/0fdb5d0e62e04065bab05d56402a2414] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp, totalSize=35.4 K 2024-11-28T09:22:34,447 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 637e913db3834921bc81f7db702dd62f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1732785751501 2024-11-28T09:22:34,447 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 907f7a5605db4fd4a05d8ddc1610d455, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1732785752647 2024-11-28T09:22:34,447 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 0fdb5d0e62e04065bab05d56402a2414, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1732785752816 2024-11-28T09:22:34,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 142d3ce9e2b1d2945b57067dd3e37abc 2024-11-28T09:22:34,456 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 142d3ce9e2b1d2945b57067dd3e37abc 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-28T09:22:34,467 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 142d3ce9e2b1d2945b57067dd3e37abc, store=A 2024-11-28T09:22:34,467 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:34,467 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 142d3ce9e2b1d2945b57067dd3e37abc, store=B 2024-11-28T09:22:34,467 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:34,467 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 142d3ce9e2b1d2945b57067dd3e37abc, store=C 2024-11-28T09:22:34,467 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:34,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742297_1473 (size=4469) 2024-11-28T09:22:34,485 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 142d3ce9e2b1d2945b57067dd3e37abc#C#compaction#407 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:22:34,486 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/C/b4026b09bf2242819362a6399e7f1d78 is 50, key is test_row_0/C:col10/1732785752816/Put/seqid=0 2024-11-28T09:22:34,495 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411280d167268465d4a2f84d548aa2d3a3c89_142d3ce9e2b1d2945b57067dd3e37abc is 50, key is test_row_0/A:col10/1732785754456/Put/seqid=0 2024-11-28T09:22:34,521 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:34,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36112 deadline: 1732785814505, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:34,523 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:34,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36160 deadline: 1732785814509, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:34,530 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:34,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36142 deadline: 1732785814509, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:34,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-28T09:22:34,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742299_1475 (size=12359) 2024-11-28T09:22:34,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742298_1474 (size=14794) 2024-11-28T09:22:34,586 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:34,587 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-28T09:22:34,587 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:34,587 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. as already flushing 2024-11-28T09:22:34,587 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:34,587 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:34,587 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:34,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:34,625 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:34,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36112 deadline: 1732785814622, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:34,641 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:34,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36160 deadline: 1732785814632, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:34,641 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:34,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36142 deadline: 1732785814632, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:34,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-28T09:22:34,740 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:34,741 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-28T09:22:34,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:34,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. as already flushing 2024-11-28T09:22:34,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:34,741 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:34,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:34,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:34,838 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:34,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36112 deadline: 1732785814826, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:34,850 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:34,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36142 deadline: 1732785814843, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:34,850 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:34,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36160 deadline: 1732785814844, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:34,877 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 142d3ce9e2b1d2945b57067dd3e37abc#A#compaction#406 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:22:34,878 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/A/6403a38949e34719857556201c22a871 is 175, key is test_row_0/A:col10/1732785752816/Put/seqid=0 2024-11-28T09:22:34,893 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:34,894 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-28T09:22:34,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:34,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. as already flushing 2024-11-28T09:22:34,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:34,894 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:34,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:34,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:34,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742300_1476 (size=31313) 2024-11-28T09:22:34,912 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/A/6403a38949e34719857556201c22a871 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/6403a38949e34719857556201c22a871 2024-11-28T09:22:34,925 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 142d3ce9e2b1d2945b57067dd3e37abc/A of 142d3ce9e2b1d2945b57067dd3e37abc into 6403a38949e34719857556201c22a871(size=30.6 K), total size for store is 30.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:22:34,925 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 142d3ce9e2b1d2945b57067dd3e37abc: 2024-11-28T09:22:34,925 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc., storeName=142d3ce9e2b1d2945b57067dd3e37abc/A, priority=13, startTime=1732785754357; duration=0sec 2024-11-28T09:22:34,925 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:22:34,925 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 142d3ce9e2b1d2945b57067dd3e37abc:A 2024-11-28T09:22:34,963 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/C/b4026b09bf2242819362a6399e7f1d78 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/b4026b09bf2242819362a6399e7f1d78 2024-11-28T09:22:34,968 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 142d3ce9e2b1d2945b57067dd3e37abc/C of 142d3ce9e2b1d2945b57067dd3e37abc into b4026b09bf2242819362a6399e7f1d78(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:22:34,968 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 142d3ce9e2b1d2945b57067dd3e37abc: 2024-11-28T09:22:34,968 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc., storeName=142d3ce9e2b1d2945b57067dd3e37abc/C, priority=13, startTime=1732785754357; duration=0sec 2024-11-28T09:22:34,968 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:22:34,968 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 142d3ce9e2b1d2945b57067dd3e37abc:C 2024-11-28T09:22:34,982 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:34,986 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411280d167268465d4a2f84d548aa2d3a3c89_142d3ce9e2b1d2945b57067dd3e37abc to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411280d167268465d4a2f84d548aa2d3a3c89_142d3ce9e2b1d2945b57067dd3e37abc 2024-11-28T09:22:34,987 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/A/c13d4b47a73e46a48f4de8a59a41ef33, store: [table=TestAcidGuarantees family=A region=142d3ce9e2b1d2945b57067dd3e37abc] 2024-11-28T09:22:34,988 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/A/c13d4b47a73e46a48f4de8a59a41ef33 is 175, key is test_row_0/A:col10/1732785754456/Put/seqid=0 2024-11-28T09:22:35,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-28T09:22:35,046 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:35,047 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-28T09:22:35,047 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:35,047 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. as already flushing 2024-11-28T09:22:35,047 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:35,047 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:35,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:35,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:35,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742301_1477 (size=39749) 2024-11-28T09:22:35,098 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:35,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732785815092, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:35,122 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:35,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36200 deadline: 1732785815118, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:35,147 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:35,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36112 deadline: 1732785815141, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:35,162 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:35,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36160 deadline: 1732785815152, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:35,161 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:35,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36142 deadline: 1732785815151, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:35,199 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:35,199 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-28T09:22:35,200 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:35,200 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. as already flushing 2024-11-28T09:22:35,200 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:35,200 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:35,200 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:35,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:35,355 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:35,355 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-28T09:22:35,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:35,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. as already flushing 2024-11-28T09:22:35,356 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:35,356 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:35,356 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:35,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:35,456 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=159, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/A/c13d4b47a73e46a48f4de8a59a41ef33 2024-11-28T09:22:35,472 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/B/16c7fbf3ef574e68a26b492dc1ffa58e is 50, key is test_row_0/B:col10/1732785754456/Put/seqid=0 2024-11-28T09:22:35,510 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:35,510 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-28T09:22:35,510 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:35,511 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. as already flushing 2024-11-28T09:22:35,511 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:35,511 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:35,511 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:35,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:35,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-28T09:22:35,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742302_1478 (size=12151) 2024-11-28T09:22:35,655 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:35,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36112 deadline: 1732785815648, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:35,665 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:35,666 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-28T09:22:35,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:35,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. as already flushing 2024-11-28T09:22:35,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:35,666 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:35,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:35,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:35,670 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:35,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36160 deadline: 1732785815664, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:35,673 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:35,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36142 deadline: 1732785815666, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:35,818 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:35,819 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-28T09:22:35,819 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:35,819 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. as already flushing 2024-11-28T09:22:35,819 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:35,819 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:35,819 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:35,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:35,951 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=159 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/B/16c7fbf3ef574e68a26b492dc1ffa58e 2024-11-28T09:22:35,965 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/C/6ccc74b3cb584030aea277e1ee35c95c is 50, key is test_row_0/C:col10/1732785754456/Put/seqid=0 2024-11-28T09:22:35,974 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:35,974 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-28T09:22:35,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:35,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. as already flushing 2024-11-28T09:22:35,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:35,975 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:35,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:35,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:36,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742303_1479 (size=12151) 2024-11-28T09:22:36,016 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=159 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/C/6ccc74b3cb584030aea277e1ee35c95c 2024-11-28T09:22:36,020 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/A/c13d4b47a73e46a48f4de8a59a41ef33 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/c13d4b47a73e46a48f4de8a59a41ef33 2024-11-28T09:22:36,024 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/c13d4b47a73e46a48f4de8a59a41ef33, entries=200, sequenceid=159, filesize=38.8 K 2024-11-28T09:22:36,025 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/B/16c7fbf3ef574e68a26b492dc1ffa58e as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/16c7fbf3ef574e68a26b492dc1ffa58e 2024-11-28T09:22:36,029 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/16c7fbf3ef574e68a26b492dc1ffa58e, entries=150, sequenceid=159, filesize=11.9 K 2024-11-28T09:22:36,030 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/C/6ccc74b3cb584030aea277e1ee35c95c as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/6ccc74b3cb584030aea277e1ee35c95c 2024-11-28T09:22:36,033 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/6ccc74b3cb584030aea277e1ee35c95c, entries=150, sequenceid=159, filesize=11.9 K 2024-11-28T09:22:36,034 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 142d3ce9e2b1d2945b57067dd3e37abc in 1578ms, sequenceid=159, compaction requested=false 2024-11-28T09:22:36,034 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 142d3ce9e2b1d2945b57067dd3e37abc: 2024-11-28T09:22:36,127 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:36,128 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-28T09:22:36,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:36,128 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2837): Flushing 142d3ce9e2b1d2945b57067dd3e37abc 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-28T09:22:36,129 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 142d3ce9e2b1d2945b57067dd3e37abc, store=A 2024-11-28T09:22:36,129 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:36,129 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 142d3ce9e2b1d2945b57067dd3e37abc, store=B 2024-11-28T09:22:36,129 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:36,129 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 142d3ce9e2b1d2945b57067dd3e37abc, store=C 2024-11-28T09:22:36,129 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:36,164 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128c09beb7b18a346eab98bed0ba918094c_142d3ce9e2b1d2945b57067dd3e37abc is 50, key is test_row_0/A:col10/1732785754503/Put/seqid=0 2024-11-28T09:22:36,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742304_1480 (size=12304) 2024-11-28T09:22:36,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:36,223 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128c09beb7b18a346eab98bed0ba918094c_142d3ce9e2b1d2945b57067dd3e37abc to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128c09beb7b18a346eab98bed0ba918094c_142d3ce9e2b1d2945b57067dd3e37abc 2024-11-28T09:22:36,225 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/A/c3f554a1f28840378dcbbd0cdf032326, store: [table=TestAcidGuarantees family=A region=142d3ce9e2b1d2945b57067dd3e37abc] 2024-11-28T09:22:36,226 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/A/c3f554a1f28840378dcbbd0cdf032326 is 175, key is test_row_0/A:col10/1732785754503/Put/seqid=0 2024-11-28T09:22:36,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742305_1481 (size=31105) 2024-11-28T09:22:36,257 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=172, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/A/c3f554a1f28840378dcbbd0cdf032326 2024-11-28T09:22:36,281 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/B/a39cff7cf5c34c849ab16d39679b3603 is 50, key is test_row_0/B:col10/1732785754503/Put/seqid=0 2024-11-28T09:22:36,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742306_1482 (size=12151) 2024-11-28T09:22:36,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-28T09:22:36,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 142d3ce9e2b1d2945b57067dd3e37abc 2024-11-28T09:22:36,666 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. as already flushing 2024-11-28T09:22:36,719 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=172 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/B/a39cff7cf5c34c849ab16d39679b3603 2024-11-28T09:22:36,735 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/C/142eb7924d4f400da01063568ccc8d1a is 50, key is test_row_0/C:col10/1732785754503/Put/seqid=0 2024-11-28T09:22:36,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742307_1483 (size=12151) 2024-11-28T09:22:36,774 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:36,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36160 deadline: 1732785816761, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:36,790 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:36,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36142 deadline: 1732785816774, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:36,793 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:36,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36112 deadline: 1732785816778, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:36,888 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:36,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36160 deadline: 1732785816876, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:36,906 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:36,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36142 deadline: 1732785816891, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:36,907 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:36,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36112 deadline: 1732785816895, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:37,101 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:37,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36160 deadline: 1732785817093, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:37,121 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:37,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36142 deadline: 1732785817110, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:37,123 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:37,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36112 deadline: 1732785817111, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:37,124 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:37,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732785817111, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:37,125 DEBUG [Thread-1926 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4229 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc., hostname=363d8d38a970,33819,1732785660637, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-28T09:22:37,150 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:37,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36200 deadline: 1732785817142, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:37,151 DEBUG [Thread-1920 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4258 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc., hostname=363d8d38a970,33819,1732785660637, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-28T09:22:37,172 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=172 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/C/142eb7924d4f400da01063568ccc8d1a 2024-11-28T09:22:37,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/A/c3f554a1f28840378dcbbd0cdf032326 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/c3f554a1f28840378dcbbd0cdf032326 2024-11-28T09:22:37,198 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/c3f554a1f28840378dcbbd0cdf032326, entries=150, sequenceid=172, filesize=30.4 K 2024-11-28T09:22:37,199 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/B/a39cff7cf5c34c849ab16d39679b3603 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/a39cff7cf5c34c849ab16d39679b3603 2024-11-28T09:22:37,204 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/a39cff7cf5c34c849ab16d39679b3603, entries=150, sequenceid=172, filesize=11.9 K 2024-11-28T09:22:37,205 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/C/142eb7924d4f400da01063568ccc8d1a as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/142eb7924d4f400da01063568ccc8d1a 2024-11-28T09:22:37,209 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/142eb7924d4f400da01063568ccc8d1a, entries=150, sequenceid=172, filesize=11.9 K 2024-11-28T09:22:37,209 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for 142d3ce9e2b1d2945b57067dd3e37abc in 1081ms, sequenceid=172, compaction requested=true 2024-11-28T09:22:37,210 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2538): Flush status journal for 142d3ce9e2b1d2945b57067dd3e37abc: 2024-11-28T09:22:37,210 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:37,210 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=111 2024-11-28T09:22:37,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4106): Remote procedure done, pid=111 2024-11-28T09:22:37,212 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=111, resume processing ppid=110 2024-11-28T09:22:37,213 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=111, ppid=110, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.7780 sec 2024-11-28T09:22:37,214 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=110, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees in 2.7830 sec 2024-11-28T09:22:37,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 142d3ce9e2b1d2945b57067dd3e37abc 2024-11-28T09:22:37,412 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 142d3ce9e2b1d2945b57067dd3e37abc 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-28T09:22:37,413 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 142d3ce9e2b1d2945b57067dd3e37abc, store=A 2024-11-28T09:22:37,413 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:37,413 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 142d3ce9e2b1d2945b57067dd3e37abc, store=B 2024-11-28T09:22:37,413 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:37,413 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 142d3ce9e2b1d2945b57067dd3e37abc, store=C 2024-11-28T09:22:37,413 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:37,441 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128101808ec220f432ba75412a8fdaf5e34_142d3ce9e2b1d2945b57067dd3e37abc is 50, key is test_row_0/A:col10/1732785757411/Put/seqid=0 2024-11-28T09:22:37,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742308_1484 (size=14794) 2024-11-28T09:22:37,457 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:37,461 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128101808ec220f432ba75412a8fdaf5e34_142d3ce9e2b1d2945b57067dd3e37abc to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128101808ec220f432ba75412a8fdaf5e34_142d3ce9e2b1d2945b57067dd3e37abc 2024-11-28T09:22:37,461 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/A/48097a7d0c4a4569986dd944c514cf2a, store: [table=TestAcidGuarantees family=A region=142d3ce9e2b1d2945b57067dd3e37abc] 2024-11-28T09:22:37,462 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/A/48097a7d0c4a4569986dd944c514cf2a is 175, key is test_row_0/A:col10/1732785757411/Put/seqid=0 2024-11-28T09:22:37,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742309_1485 (size=39749) 2024-11-28T09:22:37,494 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=199, memsize=53.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/A/48097a7d0c4a4569986dd944c514cf2a 2024-11-28T09:22:37,513 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/B/184cf1c872ef40219e68d88846704405 is 50, key is test_row_0/B:col10/1732785757411/Put/seqid=0 2024-11-28T09:22:37,533 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:37,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36112 deadline: 1732785817526, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:37,533 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:37,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36142 deadline: 1732785817526, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:37,544 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:37,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36160 deadline: 1732785817533, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:37,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742310_1486 (size=12151) 2024-11-28T09:22:37,640 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:37,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36112 deadline: 1732785817634, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:37,647 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:37,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36142 deadline: 1732785817635, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:37,652 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:37,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36160 deadline: 1732785817645, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:37,847 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:37,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36112 deadline: 1732785817841, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:37,854 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:37,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36142 deadline: 1732785817848, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:37,858 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:37,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36160 deadline: 1732785817854, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:37,958 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=199 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/B/184cf1c872ef40219e68d88846704405 2024-11-28T09:22:37,978 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/C/9c83ee1ba338496cbcd53d283c7c4081 is 50, key is test_row_0/C:col10/1732785757411/Put/seqid=0 2024-11-28T09:22:38,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742311_1487 (size=12151) 2024-11-28T09:22:38,009 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=199 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/C/9c83ee1ba338496cbcd53d283c7c4081 2024-11-28T09:22:38,016 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/A/48097a7d0c4a4569986dd944c514cf2a as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/48097a7d0c4a4569986dd944c514cf2a 2024-11-28T09:22:38,020 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/48097a7d0c4a4569986dd944c514cf2a, entries=200, sequenceid=199, filesize=38.8 K 2024-11-28T09:22:38,021 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/B/184cf1c872ef40219e68d88846704405 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/184cf1c872ef40219e68d88846704405 2024-11-28T09:22:38,025 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/184cf1c872ef40219e68d88846704405, entries=150, sequenceid=199, filesize=11.9 K 2024-11-28T09:22:38,026 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/C/9c83ee1ba338496cbcd53d283c7c4081 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/9c83ee1ba338496cbcd53d283c7c4081 2024-11-28T09:22:38,029 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/9c83ee1ba338496cbcd53d283c7c4081, entries=150, sequenceid=199, filesize=11.9 K 2024-11-28T09:22:38,030 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for 142d3ce9e2b1d2945b57067dd3e37abc in 618ms, sequenceid=199, compaction requested=true 2024-11-28T09:22:38,030 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 142d3ce9e2b1d2945b57067dd3e37abc: 2024-11-28T09:22:38,030 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 142d3ce9e2b1d2945b57067dd3e37abc:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T09:22:38,030 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:22:38,030 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T09:22:38,030 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T09:22:38,030 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 142d3ce9e2b1d2945b57067dd3e37abc:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T09:22:38,030 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:22:38,030 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 142d3ce9e2b1d2945b57067dd3e37abc:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T09:22:38,030 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:22:38,031 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48812 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T09:22:38,031 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 141916 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T09:22:38,031 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): 142d3ce9e2b1d2945b57067dd3e37abc/B is initiating minor compaction (all files) 2024-11-28T09:22:38,031 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1540): 142d3ce9e2b1d2945b57067dd3e37abc/A is initiating minor compaction (all files) 2024-11-28T09:22:38,031 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 142d3ce9e2b1d2945b57067dd3e37abc/B in TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:38,031 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 142d3ce9e2b1d2945b57067dd3e37abc/A in TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:38,031 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/376a3666bbfa40e68d40b0c944620255, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/16c7fbf3ef574e68a26b492dc1ffa58e, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/a39cff7cf5c34c849ab16d39679b3603, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/184cf1c872ef40219e68d88846704405] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp, totalSize=47.7 K 2024-11-28T09:22:38,031 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/6403a38949e34719857556201c22a871, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/c13d4b47a73e46a48f4de8a59a41ef33, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/c3f554a1f28840378dcbbd0cdf032326, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/48097a7d0c4a4569986dd944c514cf2a] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp, totalSize=138.6 K 2024-11-28T09:22:38,031 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:38,031 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. files: [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/6403a38949e34719857556201c22a871, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/c13d4b47a73e46a48f4de8a59a41ef33, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/c3f554a1f28840378dcbbd0cdf032326, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/48097a7d0c4a4569986dd944c514cf2a] 2024-11-28T09:22:38,031 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 376a3666bbfa40e68d40b0c944620255, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1732785752816 2024-11-28T09:22:38,032 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 16c7fbf3ef574e68a26b492dc1ffa58e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=159, earliestPutTs=1732785753815 2024-11-28T09:22:38,032 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6403a38949e34719857556201c22a871, keycount=150, bloomtype=ROW, size=30.6 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1732785752816 2024-11-28T09:22:38,032 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting a39cff7cf5c34c849ab16d39679b3603, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1732785754503 2024-11-28T09:22:38,032 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting c13d4b47a73e46a48f4de8a59a41ef33, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=159, earliestPutTs=1732785753815 2024-11-28T09:22:38,032 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 184cf1c872ef40219e68d88846704405, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=199, earliestPutTs=1732785756746 2024-11-28T09:22:38,032 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting c3f554a1f28840378dcbbd0cdf032326, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1732785754503 2024-11-28T09:22:38,033 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 48097a7d0c4a4569986dd944c514cf2a, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=199, earliestPutTs=1732785756746 2024-11-28T09:22:38,042 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 142d3ce9e2b1d2945b57067dd3e37abc#B#compaction#417 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:22:38,043 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/B/af2b9a1e49ab4baeaca6b9ff982c9707 is 50, key is test_row_0/B:col10/1732785757411/Put/seqid=0 2024-11-28T09:22:38,069 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=142d3ce9e2b1d2945b57067dd3e37abc] 2024-11-28T09:22:38,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742312_1488 (size=12595) 2024-11-28T09:22:38,079 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411288d0a4bb0277345a6a90928114e2fe584_142d3ce9e2b1d2945b57067dd3e37abc store=[table=TestAcidGuarantees family=A region=142d3ce9e2b1d2945b57067dd3e37abc] 2024-11-28T09:22:38,081 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411288d0a4bb0277345a6a90928114e2fe584_142d3ce9e2b1d2945b57067dd3e37abc, store=[table=TestAcidGuarantees family=A region=142d3ce9e2b1d2945b57067dd3e37abc] 2024-11-28T09:22:38,081 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411288d0a4bb0277345a6a90928114e2fe584_142d3ce9e2b1d2945b57067dd3e37abc because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=142d3ce9e2b1d2945b57067dd3e37abc] 2024-11-28T09:22:38,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742313_1489 (size=4469) 2024-11-28T09:22:38,092 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 142d3ce9e2b1d2945b57067dd3e37abc#A#compaction#418 average throughput is 1.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:22:38,093 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/A/1e24d9b7f17b4113b44e51249c984fe2 is 175, key is test_row_0/A:col10/1732785757411/Put/seqid=0 2024-11-28T09:22:38,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742314_1490 (size=31549) 2024-11-28T09:22:38,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 142d3ce9e2b1d2945b57067dd3e37abc 2024-11-28T09:22:38,160 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 142d3ce9e2b1d2945b57067dd3e37abc 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-28T09:22:38,160 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 142d3ce9e2b1d2945b57067dd3e37abc, store=A 2024-11-28T09:22:38,160 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:38,160 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 142d3ce9e2b1d2945b57067dd3e37abc, store=B 2024-11-28T09:22:38,160 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:38,160 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 142d3ce9e2b1d2945b57067dd3e37abc, store=C 2024-11-28T09:22:38,160 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:38,174 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128786464ad1ffc43499561bb9a9126f6be_142d3ce9e2b1d2945b57067dd3e37abc is 50, key is test_row_0/A:col10/1732785758159/Put/seqid=0 2024-11-28T09:22:38,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742315_1491 (size=14794) 2024-11-28T09:22:38,249 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:38,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36142 deadline: 1732785818243, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:38,250 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:38,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36160 deadline: 1732785818243, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:38,256 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:38,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36112 deadline: 1732785818249, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:38,354 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:38,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36142 deadline: 1732785818350, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:38,355 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:38,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36160 deadline: 1732785818351, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:38,361 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:38,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36112 deadline: 1732785818357, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:38,477 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/B/af2b9a1e49ab4baeaca6b9ff982c9707 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/af2b9a1e49ab4baeaca6b9ff982c9707 2024-11-28T09:22:38,481 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 142d3ce9e2b1d2945b57067dd3e37abc/B of 142d3ce9e2b1d2945b57067dd3e37abc into af2b9a1e49ab4baeaca6b9ff982c9707(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:22:38,481 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 142d3ce9e2b1d2945b57067dd3e37abc: 2024-11-28T09:22:38,481 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc., storeName=142d3ce9e2b1d2945b57067dd3e37abc/B, priority=12, startTime=1732785758030; duration=0sec 2024-11-28T09:22:38,481 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:22:38,481 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 142d3ce9e2b1d2945b57067dd3e37abc:B 2024-11-28T09:22:38,481 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T09:22:38,482 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48812 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T09:22:38,482 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): 142d3ce9e2b1d2945b57067dd3e37abc/C is initiating minor compaction (all files) 2024-11-28T09:22:38,482 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 142d3ce9e2b1d2945b57067dd3e37abc/C in TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:38,483 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/b4026b09bf2242819362a6399e7f1d78, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/6ccc74b3cb584030aea277e1ee35c95c, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/142eb7924d4f400da01063568ccc8d1a, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/9c83ee1ba338496cbcd53d283c7c4081] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp, totalSize=47.7 K 2024-11-28T09:22:38,483 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting b4026b09bf2242819362a6399e7f1d78, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1732785752816 2024-11-28T09:22:38,483 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 6ccc74b3cb584030aea277e1ee35c95c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=159, earliestPutTs=1732785753815 2024-11-28T09:22:38,483 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 142eb7924d4f400da01063568ccc8d1a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=172, earliestPutTs=1732785754503 2024-11-28T09:22:38,484 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 9c83ee1ba338496cbcd53d283c7c4081, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=199, earliestPutTs=1732785756746 2024-11-28T09:22:38,490 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 142d3ce9e2b1d2945b57067dd3e37abc#C#compaction#420 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:22:38,491 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/C/168985dab35343cda97445b62c392323 is 50, key is test_row_0/C:col10/1732785757411/Put/seqid=0 2024-11-28T09:22:38,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742316_1492 (size=12595) 2024-11-28T09:22:38,508 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/A/1e24d9b7f17b4113b44e51249c984fe2 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/1e24d9b7f17b4113b44e51249c984fe2 2024-11-28T09:22:38,512 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 142d3ce9e2b1d2945b57067dd3e37abc/A of 142d3ce9e2b1d2945b57067dd3e37abc into 1e24d9b7f17b4113b44e51249c984fe2(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:22:38,512 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 142d3ce9e2b1d2945b57067dd3e37abc: 2024-11-28T09:22:38,512 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc., storeName=142d3ce9e2b1d2945b57067dd3e37abc/A, priority=12, startTime=1732785758030; duration=0sec 2024-11-28T09:22:38,512 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:22:38,512 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 142d3ce9e2b1d2945b57067dd3e37abc:A 2024-11-28T09:22:38,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-28T09:22:38,540 INFO [Thread-1930 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 110 completed 2024-11-28T09:22:38,541 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T09:22:38,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] procedure2.ProcedureExecutor(1098): Stored pid=112, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees 2024-11-28T09:22:38,543 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=112, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T09:22:38,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-28T09:22:38,543 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=112, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T09:22:38,543 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=113, ppid=112, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T09:22:38,563 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:38,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36142 deadline: 1732785818557, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:38,563 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:38,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36160 deadline: 1732785818557, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:38,564 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:38,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36112 deadline: 1732785818562, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:38,578 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:38,581 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128786464ad1ffc43499561bb9a9126f6be_142d3ce9e2b1d2945b57067dd3e37abc to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128786464ad1ffc43499561bb9a9126f6be_142d3ce9e2b1d2945b57067dd3e37abc 2024-11-28T09:22:38,582 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/A/0ae28d0e11e24c82aaffaff7240dc005, store: [table=TestAcidGuarantees family=A region=142d3ce9e2b1d2945b57067dd3e37abc] 2024-11-28T09:22:38,582 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/A/0ae28d0e11e24c82aaffaff7240dc005 is 175, key is test_row_0/A:col10/1732785758159/Put/seqid=0 2024-11-28T09:22:38,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742317_1493 (size=39749) 2024-11-28T09:22:38,592 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=210, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/A/0ae28d0e11e24c82aaffaff7240dc005 2024-11-28T09:22:38,599 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/B/e95dc8cf38b347799acd73090c09520a is 50, key is test_row_0/B:col10/1732785758159/Put/seqid=0 2024-11-28T09:22:38,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742318_1494 (size=12151) 2024-11-28T09:22:38,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-28T09:22:38,695 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:38,695 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-28T09:22:38,696 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:38,696 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. as already flushing 2024-11-28T09:22:38,696 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:38,696 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:38,696 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:38,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:38,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-28T09:22:38,848 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:38,848 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-28T09:22:38,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:38,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. as already flushing 2024-11-28T09:22:38,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:38,849 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:38,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:38,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:38,867 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:38,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36160 deadline: 1732785818864, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:38,868 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:38,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36142 deadline: 1732785818865, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:38,870 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:38,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36112 deadline: 1732785818867, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:38,900 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/C/168985dab35343cda97445b62c392323 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/168985dab35343cda97445b62c392323 2024-11-28T09:22:38,904 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 142d3ce9e2b1d2945b57067dd3e37abc/C of 142d3ce9e2b1d2945b57067dd3e37abc into 168985dab35343cda97445b62c392323(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:22:38,904 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 142d3ce9e2b1d2945b57067dd3e37abc: 2024-11-28T09:22:38,904 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc., storeName=142d3ce9e2b1d2945b57067dd3e37abc/C, priority=12, startTime=1732785758030; duration=0sec 2024-11-28T09:22:38,904 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:22:38,904 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 142d3ce9e2b1d2945b57067dd3e37abc:C 2024-11-28T09:22:39,000 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:39,001 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-28T09:22:39,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:39,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. as already flushing 2024-11-28T09:22:39,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:39,001 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:39,001 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:39,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:39,008 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=210 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/B/e95dc8cf38b347799acd73090c09520a 2024-11-28T09:22:39,022 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/C/e9e81dad42b148a3947a26f4c0ac47a4 is 50, key is test_row_0/C:col10/1732785758159/Put/seqid=0 2024-11-28T09:22:39,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742319_1495 (size=12151) 2024-11-28T09:22:39,037 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=210 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/C/e9e81dad42b148a3947a26f4c0ac47a4 2024-11-28T09:22:39,042 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/A/0ae28d0e11e24c82aaffaff7240dc005 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/0ae28d0e11e24c82aaffaff7240dc005 2024-11-28T09:22:39,051 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/0ae28d0e11e24c82aaffaff7240dc005, entries=200, sequenceid=210, filesize=38.8 K 2024-11-28T09:22:39,052 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/B/e95dc8cf38b347799acd73090c09520a as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/e95dc8cf38b347799acd73090c09520a 2024-11-28T09:22:39,056 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/e95dc8cf38b347799acd73090c09520a, entries=150, sequenceid=210, filesize=11.9 K 2024-11-28T09:22:39,057 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/C/e9e81dad42b148a3947a26f4c0ac47a4 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/e9e81dad42b148a3947a26f4c0ac47a4 2024-11-28T09:22:39,060 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/e9e81dad42b148a3947a26f4c0ac47a4, entries=150, sequenceid=210, filesize=11.9 K 2024-11-28T09:22:39,061 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 142d3ce9e2b1d2945b57067dd3e37abc in 901ms, sequenceid=210, compaction requested=false 2024-11-28T09:22:39,061 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 142d3ce9e2b1d2945b57067dd3e37abc: 2024-11-28T09:22:39,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-28T09:22:39,153 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:39,153 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-28T09:22:39,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:39,154 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2837): Flushing 142d3ce9e2b1d2945b57067dd3e37abc 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-28T09:22:39,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 142d3ce9e2b1d2945b57067dd3e37abc, store=A 2024-11-28T09:22:39,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:39,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 142d3ce9e2b1d2945b57067dd3e37abc, store=B 2024-11-28T09:22:39,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:39,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 142d3ce9e2b1d2945b57067dd3e37abc, store=C 2024-11-28T09:22:39,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:39,162 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411286a0f2f5cf9eb4c0391f42140877ff21b_142d3ce9e2b1d2945b57067dd3e37abc is 50, key is test_row_0/A:col10/1732785758241/Put/seqid=0 2024-11-28T09:22:39,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742320_1496 (size=12304) 2024-11-28T09:22:39,374 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. as already flushing 2024-11-28T09:22:39,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 142d3ce9e2b1d2945b57067dd3e37abc 2024-11-28T09:22:39,387 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:39,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36160 deadline: 1732785819383, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:39,387 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:39,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36112 deadline: 1732785819384, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:39,391 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:39,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36142 deadline: 1732785819386, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:39,494 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:39,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36160 deadline: 1732785819488, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:39,495 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:39,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36112 deadline: 1732785819488, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:39,495 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:39,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36142 deadline: 1732785819492, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:39,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:39,589 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411286a0f2f5cf9eb4c0391f42140877ff21b_142d3ce9e2b1d2945b57067dd3e37abc to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411286a0f2f5cf9eb4c0391f42140877ff21b_142d3ce9e2b1d2945b57067dd3e37abc 2024-11-28T09:22:39,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/A/8597c5b5246d46a0ba2acec4b0617e9f, store: [table=TestAcidGuarantees family=A region=142d3ce9e2b1d2945b57067dd3e37abc] 2024-11-28T09:22:39,590 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/A/8597c5b5246d46a0ba2acec4b0617e9f is 175, key is test_row_0/A:col10/1732785758241/Put/seqid=0 2024-11-28T09:22:39,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742321_1497 (size=31105) 2024-11-28T09:22:39,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-28T09:22:39,699 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:39,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36160 deadline: 1732785819696, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:39,699 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:39,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36112 deadline: 1732785819696, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:39,700 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:39,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36142 deadline: 1732785819697, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:39,995 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=238, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/A/8597c5b5246d46a0ba2acec4b0617e9f 2024-11-28T09:22:40,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/B/0acfbd881d5140bba34928d54b88b2be is 50, key is test_row_0/B:col10/1732785758241/Put/seqid=0 2024-11-28T09:22:40,005 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:40,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36142 deadline: 1732785820001, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:40,005 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:40,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36160 deadline: 1732785820001, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:40,005 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:40,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36112 deadline: 1732785820002, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:40,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742322_1498 (size=12151) 2024-11-28T09:22:40,407 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=238 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/B/0acfbd881d5140bba34928d54b88b2be 2024-11-28T09:22:40,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/C/3d66049a42754c4093041ba6623befaf is 50, key is test_row_0/C:col10/1732785758241/Put/seqid=0 2024-11-28T09:22:40,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742323_1499 (size=12151) 2024-11-28T09:22:40,509 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:40,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36142 deadline: 1732785820506, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:40,514 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:40,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36160 deadline: 1732785820509, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:40,515 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:40,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36112 deadline: 1732785820510, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:40,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-28T09:22:40,818 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=238 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/C/3d66049a42754c4093041ba6623befaf 2024-11-28T09:22:40,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/A/8597c5b5246d46a0ba2acec4b0617e9f as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/8597c5b5246d46a0ba2acec4b0617e9f 2024-11-28T09:22:40,834 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/8597c5b5246d46a0ba2acec4b0617e9f, entries=150, sequenceid=238, filesize=30.4 K 2024-11-28T09:22:40,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/B/0acfbd881d5140bba34928d54b88b2be as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/0acfbd881d5140bba34928d54b88b2be 2024-11-28T09:22:40,839 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/0acfbd881d5140bba34928d54b88b2be, entries=150, sequenceid=238, filesize=11.9 K 2024-11-28T09:22:40,840 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/C/3d66049a42754c4093041ba6623befaf as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/3d66049a42754c4093041ba6623befaf 2024-11-28T09:22:40,846 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/3d66049a42754c4093041ba6623befaf, entries=150, sequenceid=238, filesize=11.9 K 2024-11-28T09:22:40,847 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 142d3ce9e2b1d2945b57067dd3e37abc in 1693ms, sequenceid=238, compaction requested=true 2024-11-28T09:22:40,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2538): Flush status journal for 142d3ce9e2b1d2945b57067dd3e37abc: 2024-11-28T09:22:40,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:40,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=113 2024-11-28T09:22:40,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4106): Remote procedure done, pid=113 2024-11-28T09:22:40,855 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=113, resume processing ppid=112 2024-11-28T09:22:40,855 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=113, ppid=112, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3100 sec 2024-11-28T09:22:40,856 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=112, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees in 2.3140 sec 2024-11-28T09:22:41,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 142d3ce9e2b1d2945b57067dd3e37abc 2024-11-28T09:22:41,168 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 142d3ce9e2b1d2945b57067dd3e37abc 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-28T09:22:41,168 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 142d3ce9e2b1d2945b57067dd3e37abc, store=A 2024-11-28T09:22:41,168 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:41,168 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 142d3ce9e2b1d2945b57067dd3e37abc, store=B 2024-11-28T09:22:41,168 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:41,168 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 142d3ce9e2b1d2945b57067dd3e37abc, store=C 2024-11-28T09:22:41,168 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:41,176 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411289900884d052743f2aef8ab3ad8e10166_142d3ce9e2b1d2945b57067dd3e37abc is 50, key is test_row_0/A:col10/1732785759378/Put/seqid=0 2024-11-28T09:22:41,180 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742324_1500 (size=14794) 2024-11-28T09:22:41,181 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:41,185 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411289900884d052743f2aef8ab3ad8e10166_142d3ce9e2b1d2945b57067dd3e37abc to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411289900884d052743f2aef8ab3ad8e10166_142d3ce9e2b1d2945b57067dd3e37abc 2024-11-28T09:22:41,186 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/A/4930defe2d094768b1c5799820dab4a7, store: [table=TestAcidGuarantees family=A region=142d3ce9e2b1d2945b57067dd3e37abc] 2024-11-28T09:22:41,187 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/A/4930defe2d094768b1c5799820dab4a7 is 175, key is test_row_0/A:col10/1732785759378/Put/seqid=0 2024-11-28T09:22:41,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742325_1501 (size=39749) 2024-11-28T09:22:41,260 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:41,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732785821252, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:41,261 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:41,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36200 deadline: 1732785821253, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:41,365 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:41,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732785821362, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:41,366 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:41,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36200 deadline: 1732785821362, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:41,518 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:41,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36142 deadline: 1732785821513, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:41,524 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:41,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36112 deadline: 1732785821520, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:41,525 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:41,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36160 deadline: 1732785821521, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:41,571 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:41,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36200 deadline: 1732785821567, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:41,572 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:41,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732785821567, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:41,591 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=250, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/A/4930defe2d094768b1c5799820dab4a7 2024-11-28T09:22:41,598 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/B/29596a67de954defbbfa38c3d79ee62c is 50, key is test_row_0/B:col10/1732785759378/Put/seqid=0 2024-11-28T09:22:41,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742326_1502 (size=12151) 2024-11-28T09:22:41,875 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:41,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36200 deadline: 1732785821873, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:41,880 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:41,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732785821875, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:42,013 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=250 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/B/29596a67de954defbbfa38c3d79ee62c 2024-11-28T09:22:42,019 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/C/d8df95b7347a43628914939542a45fae is 50, key is test_row_0/C:col10/1732785759378/Put/seqid=0 2024-11-28T09:22:42,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742327_1503 (size=12151) 2024-11-28T09:22:42,382 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:42,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36200 deadline: 1732785822376, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:42,392 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:42,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732785822385, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:42,426 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=250 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/C/d8df95b7347a43628914939542a45fae 2024-11-28T09:22:42,430 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/A/4930defe2d094768b1c5799820dab4a7 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/4930defe2d094768b1c5799820dab4a7 2024-11-28T09:22:42,438 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/4930defe2d094768b1c5799820dab4a7, entries=200, sequenceid=250, filesize=38.8 K 2024-11-28T09:22:42,438 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/B/29596a67de954defbbfa38c3d79ee62c as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/29596a67de954defbbfa38c3d79ee62c 2024-11-28T09:22:42,456 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/29596a67de954defbbfa38c3d79ee62c, entries=150, sequenceid=250, filesize=11.9 K 2024-11-28T09:22:42,457 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/C/d8df95b7347a43628914939542a45fae as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/d8df95b7347a43628914939542a45fae 2024-11-28T09:22:42,461 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/d8df95b7347a43628914939542a45fae, entries=150, sequenceid=250, filesize=11.9 K 2024-11-28T09:22:42,462 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 142d3ce9e2b1d2945b57067dd3e37abc in 1294ms, sequenceid=250, compaction requested=true 2024-11-28T09:22:42,462 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 142d3ce9e2b1d2945b57067dd3e37abc: 2024-11-28T09:22:42,463 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T09:22:42,463 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 142d3ce9e2b1d2945b57067dd3e37abc:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T09:22:42,463 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:22:42,463 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T09:22:42,465 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 142152 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T09:22:42,465 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1540): 142d3ce9e2b1d2945b57067dd3e37abc/A is initiating minor compaction (all files) 2024-11-28T09:22:42,465 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 142d3ce9e2b1d2945b57067dd3e37abc/A in TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:42,465 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/1e24d9b7f17b4113b44e51249c984fe2, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/0ae28d0e11e24c82aaffaff7240dc005, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/8597c5b5246d46a0ba2acec4b0617e9f, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/4930defe2d094768b1c5799820dab4a7] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp, totalSize=138.8 K 2024-11-28T09:22:42,465 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:42,465 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. files: [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/1e24d9b7f17b4113b44e51249c984fe2, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/0ae28d0e11e24c82aaffaff7240dc005, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/8597c5b5246d46a0ba2acec4b0617e9f, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/4930defe2d094768b1c5799820dab4a7] 2024-11-28T09:22:42,466 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1e24d9b7f17b4113b44e51249c984fe2, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=199, earliestPutTs=1732785756746 2024-11-28T09:22:42,466 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49048 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T09:22:42,466 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 142d3ce9e2b1d2945b57067dd3e37abc:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T09:22:42,466 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:22:42,466 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): 142d3ce9e2b1d2945b57067dd3e37abc/B is initiating minor compaction (all files) 2024-11-28T09:22:42,466 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 142d3ce9e2b1d2945b57067dd3e37abc/B in TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:42,466 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 142d3ce9e2b1d2945b57067dd3e37abc:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T09:22:42,466 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/af2b9a1e49ab4baeaca6b9ff982c9707, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/e95dc8cf38b347799acd73090c09520a, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/0acfbd881d5140bba34928d54b88b2be, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/29596a67de954defbbfa38c3d79ee62c] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp, totalSize=47.9 K 2024-11-28T09:22:42,466 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:22:42,467 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting af2b9a1e49ab4baeaca6b9ff982c9707, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=199, earliestPutTs=1732785756746 2024-11-28T09:22:42,467 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0ae28d0e11e24c82aaffaff7240dc005, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1732785757430 2024-11-28T09:22:42,467 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8597c5b5246d46a0ba2acec4b0617e9f, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1732785758241 2024-11-28T09:22:42,467 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting e95dc8cf38b347799acd73090c09520a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1732785757430 2024-11-28T09:22:42,467 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 0acfbd881d5140bba34928d54b88b2be, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1732785758241 2024-11-28T09:22:42,467 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4930defe2d094768b1c5799820dab4a7, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1732785759378 2024-11-28T09:22:42,468 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 29596a67de954defbbfa38c3d79ee62c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1732785759378 2024-11-28T09:22:42,496 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 142d3ce9e2b1d2945b57067dd3e37abc#B#compaction#429 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:22:42,497 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/B/d791731dbde740daaecc087694cfa11d is 50, key is test_row_0/B:col10/1732785759378/Put/seqid=0 2024-11-28T09:22:42,500 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=142d3ce9e2b1d2945b57067dd3e37abc] 2024-11-28T09:22:42,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742328_1504 (size=12731) 2024-11-28T09:22:42,526 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/B/d791731dbde740daaecc087694cfa11d as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/d791731dbde740daaecc087694cfa11d 2024-11-28T09:22:42,531 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 142d3ce9e2b1d2945b57067dd3e37abc/B of 142d3ce9e2b1d2945b57067dd3e37abc into d791731dbde740daaecc087694cfa11d(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:22:42,531 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 142d3ce9e2b1d2945b57067dd3e37abc: 2024-11-28T09:22:42,531 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc., storeName=142d3ce9e2b1d2945b57067dd3e37abc/B, priority=12, startTime=1732785762463; duration=0sec 2024-11-28T09:22:42,531 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:22:42,531 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 142d3ce9e2b1d2945b57067dd3e37abc:B 2024-11-28T09:22:42,531 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T09:22:42,532 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024112807a87dac215f4dc1ac19757933c8068b_142d3ce9e2b1d2945b57067dd3e37abc store=[table=TestAcidGuarantees family=A region=142d3ce9e2b1d2945b57067dd3e37abc] 2024-11-28T09:22:42,537 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024112807a87dac215f4dc1ac19757933c8068b_142d3ce9e2b1d2945b57067dd3e37abc, store=[table=TestAcidGuarantees family=A region=142d3ce9e2b1d2945b57067dd3e37abc] 2024-11-28T09:22:42,537 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112807a87dac215f4dc1ac19757933c8068b_142d3ce9e2b1d2945b57067dd3e37abc because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=142d3ce9e2b1d2945b57067dd3e37abc] 2024-11-28T09:22:42,539 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49048 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T09:22:42,539 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): 142d3ce9e2b1d2945b57067dd3e37abc/C is initiating minor compaction (all files) 2024-11-28T09:22:42,539 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 142d3ce9e2b1d2945b57067dd3e37abc/C in TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:42,539 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/168985dab35343cda97445b62c392323, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/e9e81dad42b148a3947a26f4c0ac47a4, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/3d66049a42754c4093041ba6623befaf, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/d8df95b7347a43628914939542a45fae] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp, totalSize=47.9 K 2024-11-28T09:22:42,540 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 168985dab35343cda97445b62c392323, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=199, earliestPutTs=1732785756746 2024-11-28T09:22:42,540 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting e9e81dad42b148a3947a26f4c0ac47a4, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=210, earliestPutTs=1732785757430 2024-11-28T09:22:42,540 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 3d66049a42754c4093041ba6623befaf, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1732785758241 2024-11-28T09:22:42,542 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting d8df95b7347a43628914939542a45fae, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1732785759378 2024-11-28T09:22:42,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742329_1505 (size=4469) 2024-11-28T09:22:42,557 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 142d3ce9e2b1d2945b57067dd3e37abc#A#compaction#430 average throughput is 0.43 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:22:42,558 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/A/a835524f570149218c69a1c2aa42c098 is 175, key is test_row_0/A:col10/1732785759378/Put/seqid=0 2024-11-28T09:22:42,568 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 142d3ce9e2b1d2945b57067dd3e37abc#C#compaction#431 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:22:42,568 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/C/a19f92693b1f4124bcae1d3c4f5b42fe is 50, key is test_row_0/C:col10/1732785759378/Put/seqid=0 2024-11-28T09:22:42,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742330_1506 (size=31685) 2024-11-28T09:22:42,608 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/A/a835524f570149218c69a1c2aa42c098 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/a835524f570149218c69a1c2aa42c098 2024-11-28T09:22:42,616 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 142d3ce9e2b1d2945b57067dd3e37abc/A of 142d3ce9e2b1d2945b57067dd3e37abc into a835524f570149218c69a1c2aa42c098(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:22:42,616 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 142d3ce9e2b1d2945b57067dd3e37abc: 2024-11-28T09:22:42,616 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc., storeName=142d3ce9e2b1d2945b57067dd3e37abc/A, priority=12, startTime=1732785762462; duration=0sec 2024-11-28T09:22:42,616 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:22:42,616 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 142d3ce9e2b1d2945b57067dd3e37abc:A 2024-11-28T09:22:42,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742331_1507 (size=12731) 2024-11-28T09:22:42,626 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/C/a19f92693b1f4124bcae1d3c4f5b42fe as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/a19f92693b1f4124bcae1d3c4f5b42fe 2024-11-28T09:22:42,632 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 142d3ce9e2b1d2945b57067dd3e37abc/C of 142d3ce9e2b1d2945b57067dd3e37abc into a19f92693b1f4124bcae1d3c4f5b42fe(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:22:42,632 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 142d3ce9e2b1d2945b57067dd3e37abc: 2024-11-28T09:22:42,632 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc., storeName=142d3ce9e2b1d2945b57067dd3e37abc/C, priority=12, startTime=1732785762466; duration=0sec 2024-11-28T09:22:42,632 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:22:42,632 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 142d3ce9e2b1d2945b57067dd3e37abc:C 2024-11-28T09:22:42,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-28T09:22:42,647 INFO [Thread-1930 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 112 completed 2024-11-28T09:22:42,649 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T09:22:42,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] procedure2.ProcedureExecutor(1098): Stored pid=114, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees 2024-11-28T09:22:42,650 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=114, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T09:22:42,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-28T09:22:42,651 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=114, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T09:22:42,651 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=115, ppid=114, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T09:22:42,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-28T09:22:42,804 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:42,804 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-28T09:22:42,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:42,805 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2837): Flushing 142d3ce9e2b1d2945b57067dd3e37abc 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-28T09:22:42,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 142d3ce9e2b1d2945b57067dd3e37abc, store=A 2024-11-28T09:22:42,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:42,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 142d3ce9e2b1d2945b57067dd3e37abc, store=B 2024-11-28T09:22:42,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:42,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 142d3ce9e2b1d2945b57067dd3e37abc, store=C 2024-11-28T09:22:42,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:42,813 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411282372dc64ab9641bbb258a69a1bcc08cf_142d3ce9e2b1d2945b57067dd3e37abc is 50, key is test_row_0/A:col10/1732785761252/Put/seqid=0 2024-11-28T09:22:42,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742332_1508 (size=12454) 2024-11-28T09:22:42,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:42,835 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411282372dc64ab9641bbb258a69a1bcc08cf_142d3ce9e2b1d2945b57067dd3e37abc to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411282372dc64ab9641bbb258a69a1bcc08cf_142d3ce9e2b1d2945b57067dd3e37abc 2024-11-28T09:22:42,836 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/A/80db28c3706f49c3b714ced395221966, store: [table=TestAcidGuarantees family=A region=142d3ce9e2b1d2945b57067dd3e37abc] 2024-11-28T09:22:42,837 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/A/80db28c3706f49c3b714ced395221966 is 175, key is test_row_0/A:col10/1732785761252/Put/seqid=0 2024-11-28T09:22:42,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742333_1509 (size=31255) 2024-11-28T09:22:42,842 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=277, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/A/80db28c3706f49c3b714ced395221966 2024-11-28T09:22:42,850 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/B/150f84c4cc384e10b4652a8bca633a11 is 50, key is test_row_0/B:col10/1732785761252/Put/seqid=0 2024-11-28T09:22:42,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742334_1510 (size=12301) 2024-11-28T09:22:42,855 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=277 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/B/150f84c4cc384e10b4652a8bca633a11 2024-11-28T09:22:42,862 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/C/8061949c9f3e40cea2d04dff8e73580f is 50, key is test_row_0/C:col10/1732785761252/Put/seqid=0 2024-11-28T09:22:42,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742335_1511 (size=12301) 2024-11-28T09:22:42,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-28T09:22:43,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-28T09:22:43,268 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=277 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/C/8061949c9f3e40cea2d04dff8e73580f 2024-11-28T09:22:43,272 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/A/80db28c3706f49c3b714ced395221966 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/80db28c3706f49c3b714ced395221966 2024-11-28T09:22:43,277 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/80db28c3706f49c3b714ced395221966, entries=150, sequenceid=277, filesize=30.5 K 2024-11-28T09:22:43,277 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/B/150f84c4cc384e10b4652a8bca633a11 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/150f84c4cc384e10b4652a8bca633a11 2024-11-28T09:22:43,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,281 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/150f84c4cc384e10b4652a8bca633a11, entries=150, sequenceid=277, filesize=12.0 K 2024-11-28T09:22:43,281 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/C/8061949c9f3e40cea2d04dff8e73580f as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/8061949c9f3e40cea2d04dff8e73580f 2024-11-28T09:22:43,285 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/8061949c9f3e40cea2d04dff8e73580f, entries=150, sequenceid=277, filesize=12.0 K 2024-11-28T09:22:43,285 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=0 B/0 for 142d3ce9e2b1d2945b57067dd3e37abc in 480ms, sequenceid=277, compaction requested=false 2024-11-28T09:22:43,285 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2538): Flush status journal for 142d3ce9e2b1d2945b57067dd3e37abc: 2024-11-28T09:22:43,285 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:43,285 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=115 2024-11-28T09:22:43,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4106): Remote procedure done, pid=115 2024-11-28T09:22:43,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,293 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=115, resume processing ppid=114 2024-11-28T09:22:43,294 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=115, ppid=114, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 635 msec 2024-11-28T09:22:43,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,295 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=114, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees in 645 msec 2024-11-28T09:22:43,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 142d3ce9e2b1d2945b57067dd3e37abc 2024-11-28T09:22:43,604 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 142d3ce9e2b1d2945b57067dd3e37abc 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-28T09:22:43,604 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 142d3ce9e2b1d2945b57067dd3e37abc, store=A 2024-11-28T09:22:43,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,604 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:43,605 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 142d3ce9e2b1d2945b57067dd3e37abc, store=B 2024-11-28T09:22:43,605 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:43,605 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 142d3ce9e2b1d2945b57067dd3e37abc, store=C 2024-11-28T09:22:43,605 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:43,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,613 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128195f586fcc7f4d968ee88d8cad040278_142d3ce9e2b1d2945b57067dd3e37abc is 50, key is test_row_0/A:col10/1732785763548/Put/seqid=0 2024-11-28T09:22:43,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742336_1512 (size=12454) 2024-11-28T09:22:43,651 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:43,655 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128195f586fcc7f4d968ee88d8cad040278_142d3ce9e2b1d2945b57067dd3e37abc to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128195f586fcc7f4d968ee88d8cad040278_142d3ce9e2b1d2945b57067dd3e37abc 2024-11-28T09:22:43,655 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:43,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36112 deadline: 1732785823646, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:43,656 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/A/4ee7320b595a4a5197300ceccbd16f47, store: [table=TestAcidGuarantees family=A region=142d3ce9e2b1d2945b57067dd3e37abc] 2024-11-28T09:22:43,657 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/A/4ee7320b595a4a5197300ceccbd16f47 is 175, key is test_row_0/A:col10/1732785763548/Put/seqid=0 2024-11-28T09:22:43,659 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:43,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36160 deadline: 1732785823651, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:43,660 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:43,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36200 deadline: 1732785823653, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:43,660 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:43,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732785823654, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:43,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742337_1513 (size=31255) 2024-11-28T09:22:43,664 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:43,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36142 deadline: 1732785823655, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:43,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-28T09:22:43,759 INFO [Thread-1930 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 114 completed 2024-11-28T09:22:43,758 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:43,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36112 deadline: 1732785823756, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:43,760 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T09:22:43,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] procedure2.ProcedureExecutor(1098): Stored pid=116, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees 2024-11-28T09:22:43,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-28T09:22:43,762 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=116, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T09:22:43,763 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=116, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T09:22:43,763 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=117, ppid=116, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T09:22:43,767 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:43,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36200 deadline: 1732785823761, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:43,768 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:43,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732785823761, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:43,768 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:43,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36142 deadline: 1732785823765, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:43,771 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:43,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36160 deadline: 1732785823768, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:43,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-28T09:22:43,915 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:43,915 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-11-28T09:22:43,915 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:43,915 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. as already flushing 2024-11-28T09:22:43,915 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:43,915 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] handler.RSProcedureHandler(58): pid=117 java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:43,916 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=117 java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:43,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=117 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:43,963 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:43,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36112 deadline: 1732785823960, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:43,972 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:43,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36200 deadline: 1732785823968, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:43,972 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:43,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732785823970, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:43,973 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:43,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36142 deadline: 1732785823970, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:43,978 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:43,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36160 deadline: 1732785823974, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:44,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-28T09:22:44,064 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=288, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/A/4ee7320b595a4a5197300ceccbd16f47 2024-11-28T09:22:44,068 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:44,068 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-11-28T09:22:44,068 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:44,068 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. as already flushing 2024-11-28T09:22:44,068 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:44,068 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] handler.RSProcedureHandler(58): pid=117 java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:44,068 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=117 java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:44,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=117 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:44,072 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/B/9dc50e1cfd7b4ba29e9e8febf98d7e72 is 50, key is test_row_0/B:col10/1732785763548/Put/seqid=0 2024-11-28T09:22:44,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742338_1514 (size=12301) 2024-11-28T09:22:44,079 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=288 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/B/9dc50e1cfd7b4ba29e9e8febf98d7e72 2024-11-28T09:22:44,109 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/C/cf2132c536f943d4819b7b670930d9c3 is 50, key is test_row_0/C:col10/1732785763548/Put/seqid=0 2024-11-28T09:22:44,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742339_1515 (size=12301) 2024-11-28T09:22:44,154 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=288 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/C/cf2132c536f943d4819b7b670930d9c3 2024-11-28T09:22:44,159 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/A/4ee7320b595a4a5197300ceccbd16f47 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/4ee7320b595a4a5197300ceccbd16f47 2024-11-28T09:22:44,165 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/4ee7320b595a4a5197300ceccbd16f47, entries=150, sequenceid=288, filesize=30.5 K 2024-11-28T09:22:44,165 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/B/9dc50e1cfd7b4ba29e9e8febf98d7e72 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/9dc50e1cfd7b4ba29e9e8febf98d7e72 2024-11-28T09:22:44,169 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/9dc50e1cfd7b4ba29e9e8febf98d7e72, entries=150, sequenceid=288, filesize=12.0 K 2024-11-28T09:22:44,170 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/C/cf2132c536f943d4819b7b670930d9c3 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/cf2132c536f943d4819b7b670930d9c3 2024-11-28T09:22:44,181 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/cf2132c536f943d4819b7b670930d9c3, entries=150, sequenceid=288, filesize=12.0 K 2024-11-28T09:22:44,183 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 142d3ce9e2b1d2945b57067dd3e37abc in 579ms, sequenceid=288, compaction requested=true 2024-11-28T09:22:44,183 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 142d3ce9e2b1d2945b57067dd3e37abc: 2024-11-28T09:22:44,183 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 142d3ce9e2b1d2945b57067dd3e37abc:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T09:22:44,183 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:22:44,183 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 142d3ce9e2b1d2945b57067dd3e37abc:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T09:22:44,183 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:22:44,184 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:22:44,184 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:22:44,184 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 142d3ce9e2b1d2945b57067dd3e37abc:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T09:22:44,184 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:22:44,185 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:22:44,185 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): 142d3ce9e2b1d2945b57067dd3e37abc/B is initiating minor compaction (all files) 2024-11-28T09:22:44,185 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 142d3ce9e2b1d2945b57067dd3e37abc/B in TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:44,185 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/d791731dbde740daaecc087694cfa11d, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/150f84c4cc384e10b4652a8bca633a11, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/9dc50e1cfd7b4ba29e9e8febf98d7e72] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp, totalSize=36.5 K 2024-11-28T09:22:44,185 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94195 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:22:44,185 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1540): 142d3ce9e2b1d2945b57067dd3e37abc/A is initiating minor compaction (all files) 2024-11-28T09:22:44,185 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 142d3ce9e2b1d2945b57067dd3e37abc/A in TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:44,185 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/a835524f570149218c69a1c2aa42c098, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/80db28c3706f49c3b714ced395221966, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/4ee7320b595a4a5197300ceccbd16f47] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp, totalSize=92.0 K 2024-11-28T09:22:44,185 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:44,186 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. files: [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/a835524f570149218c69a1c2aa42c098, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/80db28c3706f49c3b714ced395221966, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/4ee7320b595a4a5197300ceccbd16f47] 2024-11-28T09:22:44,186 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting d791731dbde740daaecc087694cfa11d, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1732785759378 2024-11-28T09:22:44,186 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting a835524f570149218c69a1c2aa42c098, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1732785759378 2024-11-28T09:22:44,187 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 80db28c3706f49c3b714ced395221966, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=277, earliestPutTs=1732785761238 2024-11-28T09:22:44,187 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 150f84c4cc384e10b4652a8bca633a11, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=277, earliestPutTs=1732785761238 2024-11-28T09:22:44,187 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 9dc50e1cfd7b4ba29e9e8febf98d7e72, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1732785763548 2024-11-28T09:22:44,187 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4ee7320b595a4a5197300ceccbd16f47, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1732785763548 2024-11-28T09:22:44,203 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=142d3ce9e2b1d2945b57067dd3e37abc] 2024-11-28T09:22:44,214 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 142d3ce9e2b1d2945b57067dd3e37abc#B#compaction#439 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:22:44,214 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/B/51643fb9b1cb4323af09f51ed2570735 is 50, key is test_row_0/B:col10/1732785763548/Put/seqid=0 2024-11-28T09:22:44,221 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:44,221 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-11-28T09:22:44,221 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:44,222 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2837): Flushing 142d3ce9e2b1d2945b57067dd3e37abc 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-28T09:22:44,222 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241128ca26e197c116453aac6ade63ab56ba36_142d3ce9e2b1d2945b57067dd3e37abc store=[table=TestAcidGuarantees family=A region=142d3ce9e2b1d2945b57067dd3e37abc] 2024-11-28T09:22:44,222 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 142d3ce9e2b1d2945b57067dd3e37abc, store=A 2024-11-28T09:22:44,222 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:44,222 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 142d3ce9e2b1d2945b57067dd3e37abc, store=B 2024-11-28T09:22:44,222 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:44,222 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 142d3ce9e2b1d2945b57067dd3e37abc, store=C 2024-11-28T09:22:44,222 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:44,224 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241128ca26e197c116453aac6ade63ab56ba36_142d3ce9e2b1d2945b57067dd3e37abc, store=[table=TestAcidGuarantees family=A region=142d3ce9e2b1d2945b57067dd3e37abc] 2024-11-28T09:22:44,224 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128ca26e197c116453aac6ade63ab56ba36_142d3ce9e2b1d2945b57067dd3e37abc because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=142d3ce9e2b1d2945b57067dd3e37abc] 2024-11-28T09:22:44,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742340_1516 (size=12983) 2024-11-28T09:22:44,272 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/B/51643fb9b1cb4323af09f51ed2570735 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/51643fb9b1cb4323af09f51ed2570735 2024-11-28T09:22:44,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 142d3ce9e2b1d2945b57067dd3e37abc 2024-11-28T09:22:44,272 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. as already flushing 2024-11-28T09:22:44,278 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128356e512c37cd4fa3852546f028a69a7b_142d3ce9e2b1d2945b57067dd3e37abc is 50, key is test_row_0/A:col10/1732785763650/Put/seqid=0 2024-11-28T09:22:44,279 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 142d3ce9e2b1d2945b57067dd3e37abc/B of 142d3ce9e2b1d2945b57067dd3e37abc into 51643fb9b1cb4323af09f51ed2570735(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:22:44,279 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 142d3ce9e2b1d2945b57067dd3e37abc: 2024-11-28T09:22:44,279 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc., storeName=142d3ce9e2b1d2945b57067dd3e37abc/B, priority=13, startTime=1732785764183; duration=0sec 2024-11-28T09:22:44,279 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:22:44,279 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 142d3ce9e2b1d2945b57067dd3e37abc:B 2024-11-28T09:22:44,279 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:22:44,280 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:22:44,280 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): 142d3ce9e2b1d2945b57067dd3e37abc/C is initiating minor compaction (all files) 2024-11-28T09:22:44,280 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 142d3ce9e2b1d2945b57067dd3e37abc/C in TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:44,280 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/a19f92693b1f4124bcae1d3c4f5b42fe, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/8061949c9f3e40cea2d04dff8e73580f, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/cf2132c536f943d4819b7b670930d9c3] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp, totalSize=36.5 K 2024-11-28T09:22:44,281 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting a19f92693b1f4124bcae1d3c4f5b42fe, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=250, earliestPutTs=1732785759378 2024-11-28T09:22:44,281 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 8061949c9f3e40cea2d04dff8e73580f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=277, earliestPutTs=1732785761238 2024-11-28T09:22:44,282 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting cf2132c536f943d4819b7b670930d9c3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1732785763548 2024-11-28T09:22:44,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742341_1517 (size=4469) 2024-11-28T09:22:44,288 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 142d3ce9e2b1d2945b57067dd3e37abc#A#compaction#438 average throughput is 0.29 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:22:44,289 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/A/20e8aee5f7184f5083d86b7c387ea6eb is 175, key is test_row_0/A:col10/1732785763548/Put/seqid=0 2024-11-28T09:22:44,300 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:44,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732785824286, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:44,301 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:44,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36160 deadline: 1732785824287, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:44,308 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:44,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36112 deadline: 1732785824298, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:44,308 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:44,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36200 deadline: 1732785824299, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:44,309 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:44,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36142 deadline: 1732785824301, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:44,317 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 142d3ce9e2b1d2945b57067dd3e37abc#C#compaction#441 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:22:44,318 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/C/3caef55a5afa4ab8971c89fcb3b99440 is 50, key is test_row_0/C:col10/1732785763548/Put/seqid=0 2024-11-28T09:22:44,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742342_1518 (size=12454) 2024-11-28T09:22:44,326 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:44,333 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128356e512c37cd4fa3852546f028a69a7b_142d3ce9e2b1d2945b57067dd3e37abc to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128356e512c37cd4fa3852546f028a69a7b_142d3ce9e2b1d2945b57067dd3e37abc 2024-11-28T09:22:44,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/A/6a8779a2ee3b4cbfb0cc3eaef0986573, store: [table=TestAcidGuarantees family=A region=142d3ce9e2b1d2945b57067dd3e37abc] 2024-11-28T09:22:44,336 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/A/6a8779a2ee3b4cbfb0cc3eaef0986573 is 175, key is test_row_0/A:col10/1732785763650/Put/seqid=0 2024-11-28T09:22:44,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742343_1519 (size=31937) 2024-11-28T09:22:44,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742345_1521 (size=31255) 2024-11-28T09:22:44,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742344_1520 (size=12983) 2024-11-28T09:22:44,353 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/C/3caef55a5afa4ab8971c89fcb3b99440 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/3caef55a5afa4ab8971c89fcb3b99440 2024-11-28T09:22:44,356 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 142d3ce9e2b1d2945b57067dd3e37abc/C of 142d3ce9e2b1d2945b57067dd3e37abc into 3caef55a5afa4ab8971c89fcb3b99440(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:22:44,356 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 142d3ce9e2b1d2945b57067dd3e37abc: 2024-11-28T09:22:44,356 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc., storeName=142d3ce9e2b1d2945b57067dd3e37abc/C, priority=13, startTime=1732785764184; duration=0sec 2024-11-28T09:22:44,356 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:22:44,356 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 142d3ce9e2b1d2945b57067dd3e37abc:C 2024-11-28T09:22:44,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-28T09:22:44,406 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:44,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36160 deadline: 1732785824402, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:44,406 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:44,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732785824403, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:44,412 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:44,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36200 deadline: 1732785824410, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:44,413 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:44,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36112 deadline: 1732785824410, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:44,414 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:44,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36142 deadline: 1732785824411, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:44,612 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:44,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36160 deadline: 1732785824608, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:44,613 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:44,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732785824608, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:44,617 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:44,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36200 deadline: 1732785824614, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:44,618 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:44,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36112 deadline: 1732785824615, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:44,621 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:44,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36142 deadline: 1732785824616, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:44,748 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=313, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/A/6a8779a2ee3b4cbfb0cc3eaef0986573 2024-11-28T09:22:44,751 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/A/20e8aee5f7184f5083d86b7c387ea6eb as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/20e8aee5f7184f5083d86b7c387ea6eb 2024-11-28T09:22:44,763 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/B/3fe174329ad44809afed859a08c2ace2 is 50, key is test_row_0/B:col10/1732785763650/Put/seqid=0 2024-11-28T09:22:44,766 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 142d3ce9e2b1d2945b57067dd3e37abc/A of 142d3ce9e2b1d2945b57067dd3e37abc into 20e8aee5f7184f5083d86b7c387ea6eb(size=31.2 K), total size for store is 31.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:22:44,766 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 142d3ce9e2b1d2945b57067dd3e37abc: 2024-11-28T09:22:44,766 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc., storeName=142d3ce9e2b1d2945b57067dd3e37abc/A, priority=13, startTime=1732785764183; duration=0sec 2024-11-28T09:22:44,766 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:22:44,766 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 142d3ce9e2b1d2945b57067dd3e37abc:A 2024-11-28T09:22:44,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742346_1522 (size=12301) 2024-11-28T09:22:44,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-28T09:22:44,917 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:44,917 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:44,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732785824914, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:44,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36160 deadline: 1732785824913, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:44,924 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:44,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36200 deadline: 1732785824918, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:44,924 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:44,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36112 deadline: 1732785824919, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:44,926 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:44,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36142 deadline: 1732785824923, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:45,169 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=313 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/B/3fe174329ad44809afed859a08c2ace2 2024-11-28T09:22:45,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/C/3b4e7aa95b1b44a49098adfaf219891b is 50, key is test_row_0/C:col10/1732785763650/Put/seqid=0 2024-11-28T09:22:45,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742347_1523 (size=12301) 2024-11-28T09:22:45,427 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:45,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732785825420, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:45,440 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:45,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36142 deadline: 1732785825428, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:45,440 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:45,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36160 deadline: 1732785825432, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:45,444 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:45,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36200 deadline: 1732785825433, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:45,444 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:45,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36112 deadline: 1732785825433, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:45,657 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=313 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/C/3b4e7aa95b1b44a49098adfaf219891b 2024-11-28T09:22:45,662 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/A/6a8779a2ee3b4cbfb0cc3eaef0986573 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/6a8779a2ee3b4cbfb0cc3eaef0986573 2024-11-28T09:22:45,667 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/6a8779a2ee3b4cbfb0cc3eaef0986573, entries=150, sequenceid=313, filesize=30.5 K 2024-11-28T09:22:45,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/B/3fe174329ad44809afed859a08c2ace2 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/3fe174329ad44809afed859a08c2ace2 2024-11-28T09:22:45,674 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/3fe174329ad44809afed859a08c2ace2, entries=150, sequenceid=313, filesize=12.0 K 2024-11-28T09:22:45,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/C/3b4e7aa95b1b44a49098adfaf219891b as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/3b4e7aa95b1b44a49098adfaf219891b 2024-11-28T09:22:45,678 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/3b4e7aa95b1b44a49098adfaf219891b, entries=150, sequenceid=313, filesize=12.0 K 2024-11-28T09:22:45,679 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for 142d3ce9e2b1d2945b57067dd3e37abc in 1457ms, sequenceid=313, compaction requested=false 2024-11-28T09:22:45,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2538): Flush status journal for 142d3ce9e2b1d2945b57067dd3e37abc: 2024-11-28T09:22:45,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:45,679 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=117 2024-11-28T09:22:45,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4106): Remote procedure done, pid=117 2024-11-28T09:22:45,682 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=117, resume processing ppid=116 2024-11-28T09:22:45,682 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=117, ppid=116, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.9180 sec 2024-11-28T09:22:45,684 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=116, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees in 1.9230 sec 2024-11-28T09:22:45,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-28T09:22:45,866 INFO [Thread-1930 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 116 completed 2024-11-28T09:22:45,869 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T09:22:45,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] procedure2.ProcedureExecutor(1098): Stored pid=118, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees 2024-11-28T09:22:45,870 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=118, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T09:22:45,871 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=118, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T09:22:45,871 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=119, ppid=118, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T09:22:45,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-28T09:22:45,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-28T09:22:46,023 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:46,025 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-28T09:22:46,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:46,026 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2837): Flushing 142d3ce9e2b1d2945b57067dd3e37abc 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-28T09:22:46,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 142d3ce9e2b1d2945b57067dd3e37abc, store=A 2024-11-28T09:22:46,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:46,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 142d3ce9e2b1d2945b57067dd3e37abc, store=B 2024-11-28T09:22:46,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:46,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 142d3ce9e2b1d2945b57067dd3e37abc, store=C 2024-11-28T09:22:46,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:46,065 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128d1471161206e4661855177bd5e7e6409_142d3ce9e2b1d2945b57067dd3e37abc is 50, key is test_row_0/A:col10/1732785764286/Put/seqid=0 2024-11-28T09:22:46,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742348_1524 (size=12454) 2024-11-28T09:22:46,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,135 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128d1471161206e4661855177bd5e7e6409_142d3ce9e2b1d2945b57067dd3e37abc to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128d1471161206e4661855177bd5e7e6409_142d3ce9e2b1d2945b57067dd3e37abc 2024-11-28T09:22:46,136 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/A/bc8e20cf73b64046b882604bc8ed96a5, store: [table=TestAcidGuarantees family=A region=142d3ce9e2b1d2945b57067dd3e37abc] 2024-11-28T09:22:46,137 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/A/bc8e20cf73b64046b882604bc8ed96a5 is 175, key is test_row_0/A:col10/1732785764286/Put/seqid=0 2024-11-28T09:22:46,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-28T09:22:46,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742349_1525 (size=31255) 2024-11-28T09:22:46,183 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=328, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/A/bc8e20cf73b64046b882604bc8ed96a5 2024-11-28T09:22:46,202 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/B/21ea40494ef74340b7a5bc9f22e04a4d is 50, key is test_row_0/B:col10/1732785764286/Put/seqid=0 2024-11-28T09:22:46,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742350_1526 (size=12301) 2024-11-28T09:22:46,258 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=328 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/B/21ea40494ef74340b7a5bc9f22e04a4d 2024-11-28T09:22:46,276 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/C/346f604799184495be117e7ab4c392e3 is 50, key is test_row_0/C:col10/1732785764286/Put/seqid=0 2024-11-28T09:22:46,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742351_1527 (size=12301) 2024-11-28T09:22:46,338 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=328 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/C/346f604799184495be117e7ab4c392e3 2024-11-28T09:22:46,345 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/A/bc8e20cf73b64046b882604bc8ed96a5 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/bc8e20cf73b64046b882604bc8ed96a5 2024-11-28T09:22:46,349 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/bc8e20cf73b64046b882604bc8ed96a5, entries=150, sequenceid=328, filesize=30.5 K 2024-11-28T09:22:46,350 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/B/21ea40494ef74340b7a5bc9f22e04a4d as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/21ea40494ef74340b7a5bc9f22e04a4d 2024-11-28T09:22:46,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,357 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/21ea40494ef74340b7a5bc9f22e04a4d, entries=150, sequenceid=328, filesize=12.0 K 2024-11-28T09:22:46,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/C/346f604799184495be117e7ab4c392e3 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/346f604799184495be117e7ab4c392e3 2024-11-28T09:22:46,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,364 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/346f604799184495be117e7ab4c392e3, entries=150, sequenceid=328, filesize=12.0 K 2024-11-28T09:22:46,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,365 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=0 B/0 for 142d3ce9e2b1d2945b57067dd3e37abc in 339ms, sequenceid=328, compaction requested=true 2024-11-28T09:22:46,365 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2538): Flush status journal for 142d3ce9e2b1d2945b57067dd3e37abc: 2024-11-28T09:22:46,365 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:46,365 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=119 2024-11-28T09:22:46,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4106): Remote procedure done, pid=119 2024-11-28T09:22:46,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,374 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=119, resume processing ppid=118 2024-11-28T09:22:46,374 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=119, ppid=118, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 502 msec 2024-11-28T09:22:46,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,376 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=118, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees in 506 msec 2024-11-28T09:22:46,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-28T09:22:46,481 INFO [Thread-1930 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 118 completed 2024-11-28T09:22:46,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,482 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T09:22:46,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] procedure2.ProcedureExecutor(1098): Stored pid=120, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees 2024-11-28T09:22:46,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,483 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=120, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T09:22:46,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,484 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=120, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T09:22:46,484 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=121, ppid=120, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T09:22:46,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-28T09:22:46,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-28T09:22:46,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,603 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 142d3ce9e2b1d2945b57067dd3e37abc 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-28T09:22:46,603 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 142d3ce9e2b1d2945b57067dd3e37abc, store=A 2024-11-28T09:22:46,604 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:46,604 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 142d3ce9e2b1d2945b57067dd3e37abc, store=B 2024-11-28T09:22:46,604 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:46,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,604 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 142d3ce9e2b1d2945b57067dd3e37abc, store=C 2024-11-28T09:22:46,604 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:46,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 142d3ce9e2b1d2945b57067dd3e37abc 2024-11-28T09:22:46,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,636 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:46,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,637 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-28T09:22:46,637 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:46,637 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. as already flushing 2024-11-28T09:22:46,637 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:46,637 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:46,637 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:46,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:46,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,647 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128b29d0b10d03b4bae85f94a044de05513_142d3ce9e2b1d2945b57067dd3e37abc is 50, key is test_row_0/A:col10/1732785766559/Put/seqid=0 2024-11-28T09:22:46,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742353_1529 (size=25158) 2024-11-28T09:22:46,717 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:46,729 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128b29d0b10d03b4bae85f94a044de05513_142d3ce9e2b1d2945b57067dd3e37abc to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128b29d0b10d03b4bae85f94a044de05513_142d3ce9e2b1d2945b57067dd3e37abc 2024-11-28T09:22:46,730 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/A/57afd689faca4ce0adddcfdfac38e819, store: [table=TestAcidGuarantees family=A region=142d3ce9e2b1d2945b57067dd3e37abc] 2024-11-28T09:22:46,731 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/A/57afd689faca4ce0adddcfdfac38e819 is 175, key is test_row_0/A:col10/1732785766559/Put/seqid=0 2024-11-28T09:22:46,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742352_1528 (size=74794) 2024-11-28T09:22:46,772 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:46,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732785826743, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:46,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-28T09:22:46,790 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:46,791 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-28T09:22:46,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:46,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. as already flushing 2024-11-28T09:22:46,792 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:46,792 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:46,792 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:46,795 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:46,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36200 deadline: 1732785826766, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:46,795 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:46,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36142 deadline: 1732785826771, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:46,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:46,799 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:46,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36112 deadline: 1732785826772, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:46,800 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:46,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36160 deadline: 1732785826774, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:46,889 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:46,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732785826874, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:46,907 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:46,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36200 deadline: 1732785826896, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:46,908 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:46,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36142 deadline: 1732785826896, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:46,908 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:46,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36112 deadline: 1732785826900, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:46,908 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:46,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36160 deadline: 1732785826901, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:46,948 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:46,948 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-28T09:22:46,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:46,949 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. as already flushing 2024-11-28T09:22:46,949 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:46,949 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:46,949 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:46,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:47,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-28T09:22:47,101 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:47,102 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-28T09:22:47,102 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:47,102 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. as already flushing 2024-11-28T09:22:47,102 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:47,102 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:47,102 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:47,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:47,103 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:47,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732785827092, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:47,121 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:47,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36200 deadline: 1732785827109, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:47,121 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:47,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36112 deadline: 1732785827109, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:47,122 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:47,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36142 deadline: 1732785827109, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:47,132 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:47,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36160 deadline: 1732785827115, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:47,134 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=339, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/A/57afd689faca4ce0adddcfdfac38e819 2024-11-28T09:22:47,152 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/B/ef4a8a2b31bf4e2194076df03c43a487 is 50, key is test_row_0/B:col10/1732785766559/Put/seqid=0 2024-11-28T09:22:47,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742354_1530 (size=12301) 2024-11-28T09:22:47,254 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:47,255 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-28T09:22:47,255 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:47,255 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. as already flushing 2024-11-28T09:22:47,255 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:47,255 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:47,255 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:47,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:47,407 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:47,409 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-28T09:22:47,416 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:47,416 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. as already flushing 2024-11-28T09:22:47,416 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:47,416 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:47,416 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:47,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732785827406, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:47,416 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:47,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:47,429 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:47,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36112 deadline: 1732785827424, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:47,429 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:47,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36200 deadline: 1732785827425, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:47,429 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:47,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36142 deadline: 1732785827425, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:47,447 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:47,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36160 deadline: 1732785827435, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:47,568 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:47,569 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-28T09:22:47,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:47,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. as already flushing 2024-11-28T09:22:47,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:47,569 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:47,569 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:47,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:47,583 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=339 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/B/ef4a8a2b31bf4e2194076df03c43a487 2024-11-28T09:22:47,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-28T09:22:47,605 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/C/c2324d2bbc30402eb8d9e925317c9538 is 50, key is test_row_0/C:col10/1732785766559/Put/seqid=0 2024-11-28T09:22:47,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742355_1531 (size=12301) 2024-11-28T09:22:47,722 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:47,723 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-28T09:22:47,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:47,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. as already flushing 2024-11-28T09:22:47,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:47,723 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:47,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:47,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:47,877 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:47,878 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-28T09:22:47,878 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:47,878 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. as already flushing 2024-11-28T09:22:47,878 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:47,878 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:47,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:47,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:47,927 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:47,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36174 deadline: 1732785827919, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:47,936 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:47,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36142 deadline: 1732785827932, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:47,937 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:47,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36200 deadline: 1732785827933, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:47,940 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:47,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36112 deadline: 1732785827934, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:47,962 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:47,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:36160 deadline: 1732785827962, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:48,032 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:48,032 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-28T09:22:48,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:48,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. as already flushing 2024-11-28T09:22:48,035 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:48,035 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] handler.RSProcedureHandler(58): pid=121 java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:48,035 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=121 java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:48,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=121 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:48,054 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=339 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/C/c2324d2bbc30402eb8d9e925317c9538 2024-11-28T09:22:48,063 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/A/57afd689faca4ce0adddcfdfac38e819 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/57afd689faca4ce0adddcfdfac38e819 2024-11-28T09:22:48,070 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/57afd689faca4ce0adddcfdfac38e819, entries=400, sequenceid=339, filesize=73.0 K 2024-11-28T09:22:48,071 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/B/ef4a8a2b31bf4e2194076df03c43a487 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/ef4a8a2b31bf4e2194076df03c43a487 2024-11-28T09:22:48,074 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/ef4a8a2b31bf4e2194076df03c43a487, entries=150, sequenceid=339, filesize=12.0 K 2024-11-28T09:22:48,075 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/C/c2324d2bbc30402eb8d9e925317c9538 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/c2324d2bbc30402eb8d9e925317c9538 2024-11-28T09:22:48,079 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/c2324d2bbc30402eb8d9e925317c9538, entries=150, sequenceid=339, filesize=12.0 K 2024-11-28T09:22:48,081 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 142d3ce9e2b1d2945b57067dd3e37abc in 1477ms, sequenceid=339, compaction requested=true 2024-11-28T09:22:48,081 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 142d3ce9e2b1d2945b57067dd3e37abc: 2024-11-28T09:22:48,081 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T09:22:48,082 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 169241 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T09:22:48,082 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1540): 142d3ce9e2b1d2945b57067dd3e37abc/A is initiating minor compaction (all files) 2024-11-28T09:22:48,082 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 142d3ce9e2b1d2945b57067dd3e37abc/A in TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:48,082 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/20e8aee5f7184f5083d86b7c387ea6eb, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/6a8779a2ee3b4cbfb0cc3eaef0986573, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/bc8e20cf73b64046b882604bc8ed96a5, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/57afd689faca4ce0adddcfdfac38e819] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp, totalSize=165.3 K 2024-11-28T09:22:48,082 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:48,082 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. files: [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/20e8aee5f7184f5083d86b7c387ea6eb, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/6a8779a2ee3b4cbfb0cc3eaef0986573, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/bc8e20cf73b64046b882604bc8ed96a5, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/57afd689faca4ce0adddcfdfac38e819] 2024-11-28T09:22:48,083 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 20e8aee5f7184f5083d86b7c387ea6eb, keycount=150, bloomtype=ROW, size=31.2 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1732785763548 2024-11-28T09:22:48,083 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 142d3ce9e2b1d2945b57067dd3e37abc:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T09:22:48,083 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:22:48,083 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6a8779a2ee3b4cbfb0cc3eaef0986573, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=313, earliestPutTs=1732785763643 2024-11-28T09:22:48,084 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting bc8e20cf73b64046b882604bc8ed96a5, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=328, earliestPutTs=1732785764270 2024-11-28T09:22:48,084 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 57afd689faca4ce0adddcfdfac38e819, keycount=400, bloomtype=ROW, size=73.0 K, encoding=NONE, compression=NONE, seqNum=339, earliestPutTs=1732785766554 2024-11-28T09:22:48,085 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T09:22:48,085 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 142d3ce9e2b1d2945b57067dd3e37abc:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T09:22:48,085 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:22:48,087 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49886 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T09:22:48,087 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): 142d3ce9e2b1d2945b57067dd3e37abc/B is initiating minor compaction (all files) 2024-11-28T09:22:48,087 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 142d3ce9e2b1d2945b57067dd3e37abc/B in TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:48,087 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/51643fb9b1cb4323af09f51ed2570735, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/3fe174329ad44809afed859a08c2ace2, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/21ea40494ef74340b7a5bc9f22e04a4d, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/ef4a8a2b31bf4e2194076df03c43a487] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp, totalSize=48.7 K 2024-11-28T09:22:48,088 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 51643fb9b1cb4323af09f51ed2570735, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1732785763548 2024-11-28T09:22:48,089 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 3fe174329ad44809afed859a08c2ace2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=313, earliestPutTs=1732785763643 2024-11-28T09:22:48,089 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 142d3ce9e2b1d2945b57067dd3e37abc:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T09:22:48,090 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:22:48,090 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 21ea40494ef74340b7a5bc9f22e04a4d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=328, earliestPutTs=1732785764270 2024-11-28T09:22:48,090 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting ef4a8a2b31bf4e2194076df03c43a487, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=339, earliestPutTs=1732785766556 2024-11-28T09:22:48,101 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=142d3ce9e2b1d2945b57067dd3e37abc] 2024-11-28T09:22:48,118 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 142d3ce9e2b1d2945b57067dd3e37abc#B#compaction#451 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:22:48,119 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/B/620e87879b7948d9b02d569fc5712df8 is 50, key is test_row_0/B:col10/1732785766559/Put/seqid=0 2024-11-28T09:22:48,129 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241128975514ca6b6d496b809e97d6e6a30238_142d3ce9e2b1d2945b57067dd3e37abc store=[table=TestAcidGuarantees family=A region=142d3ce9e2b1d2945b57067dd3e37abc] 2024-11-28T09:22:48,133 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241128975514ca6b6d496b809e97d6e6a30238_142d3ce9e2b1d2945b57067dd3e37abc, store=[table=TestAcidGuarantees family=A region=142d3ce9e2b1d2945b57067dd3e37abc] 2024-11-28T09:22:48,133 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128975514ca6b6d496b809e97d6e6a30238_142d3ce9e2b1d2945b57067dd3e37abc because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=142d3ce9e2b1d2945b57067dd3e37abc] 2024-11-28T09:22:48,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742356_1532 (size=13119) 2024-11-28T09:22:48,193 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:48,193 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=121 2024-11-28T09:22:48,194 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:48,194 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2837): Flushing 142d3ce9e2b1d2945b57067dd3e37abc 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-28T09:22:48,194 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 142d3ce9e2b1d2945b57067dd3e37abc, store=A 2024-11-28T09:22:48,194 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:48,194 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 142d3ce9e2b1d2945b57067dd3e37abc, store=B 2024-11-28T09:22:48,194 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:48,194 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 142d3ce9e2b1d2945b57067dd3e37abc, store=C 2024-11-28T09:22:48,194 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:48,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742357_1533 (size=4469) 2024-11-28T09:22:48,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112811e77a710c18493a949bb5b405b93424_142d3ce9e2b1d2945b57067dd3e37abc is 50, key is test_row_0/A:col10/1732785766762/Put/seqid=0 2024-11-28T09:22:48,250 DEBUG [Thread-1937 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x75e4d3d0 to 127.0.0.1:53251 2024-11-28T09:22:48,250 DEBUG [Thread-1937 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T09:22:48,257 DEBUG [Thread-1931 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5aee939b to 127.0.0.1:53251 2024-11-28T09:22:48,257 DEBUG [Thread-1931 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T09:22:48,264 DEBUG [Thread-1933 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1f49665c to 127.0.0.1:53251 2024-11-28T09:22:48,265 DEBUG [Thread-1933 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T09:22:48,266 DEBUG [Thread-1935 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x683f8469 to 127.0.0.1:53251 2024-11-28T09:22:48,266 DEBUG [Thread-1935 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T09:22:48,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742358_1534 (size=12454) 2024-11-28T09:22:48,267 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:48,271 DEBUG [Thread-1939 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2b308f62 to 127.0.0.1:53251 2024-11-28T09:22:48,271 DEBUG [Thread-1939 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T09:22:48,273 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112811e77a710c18493a949bb5b405b93424_142d3ce9e2b1d2945b57067dd3e37abc to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112811e77a710c18493a949bb5b405b93424_142d3ce9e2b1d2945b57067dd3e37abc 2024-11-28T09:22:48,274 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/A/b9b15d89c7f344529bb5a11be523fd94, store: [table=TestAcidGuarantees family=A region=142d3ce9e2b1d2945b57067dd3e37abc] 2024-11-28T09:22:48,275 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/A/b9b15d89c7f344529bb5a11be523fd94 is 175, key is test_row_0/A:col10/1732785766762/Put/seqid=0 2024-11-28T09:22:48,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742359_1535 (size=31255) 2024-11-28T09:22:48,588 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/B/620e87879b7948d9b02d569fc5712df8 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/620e87879b7948d9b02d569fc5712df8 2024-11-28T09:22:48,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-28T09:22:48,593 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 142d3ce9e2b1d2945b57067dd3e37abc/B of 142d3ce9e2b1d2945b57067dd3e37abc into 620e87879b7948d9b02d569fc5712df8(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:22:48,593 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 142d3ce9e2b1d2945b57067dd3e37abc: 2024-11-28T09:22:48,593 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc., storeName=142d3ce9e2b1d2945b57067dd3e37abc/B, priority=12, startTime=1732785768085; duration=0sec 2024-11-28T09:22:48,594 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:22:48,594 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 142d3ce9e2b1d2945b57067dd3e37abc:B 2024-11-28T09:22:48,594 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T09:22:48,595 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49886 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T09:22:48,595 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): 142d3ce9e2b1d2945b57067dd3e37abc/C is initiating minor compaction (all files) 2024-11-28T09:22:48,595 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 142d3ce9e2b1d2945b57067dd3e37abc/C in TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:48,596 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/3caef55a5afa4ab8971c89fcb3b99440, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/3b4e7aa95b1b44a49098adfaf219891b, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/346f604799184495be117e7ab4c392e3, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/c2324d2bbc30402eb8d9e925317c9538] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp, totalSize=48.7 K 2024-11-28T09:22:48,596 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 3caef55a5afa4ab8971c89fcb3b99440, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1732785763548 2024-11-28T09:22:48,596 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 3b4e7aa95b1b44a49098adfaf219891b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=313, earliestPutTs=1732785763643 2024-11-28T09:22:48,597 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 346f604799184495be117e7ab4c392e3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=328, earliestPutTs=1732785764270 2024-11-28T09:22:48,597 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting c2324d2bbc30402eb8d9e925317c9538, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=339, earliestPutTs=1732785766556 2024-11-28T09:22:48,612 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 142d3ce9e2b1d2945b57067dd3e37abc#A#compaction#450 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:22:48,613 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/A/431bca0f62074e46997141da854b1ed8 is 175, key is test_row_0/A:col10/1732785766559/Put/seqid=0 2024-11-28T09:22:48,616 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 142d3ce9e2b1d2945b57067dd3e37abc#C#compaction#453 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:22:48,616 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/C/4c2fc923fe8c42df8ce756385f808d26 is 50, key is test_row_0/C:col10/1732785766559/Put/seqid=0 2024-11-28T09:22:48,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742360_1536 (size=32073) 2024-11-28T09:22:48,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742361_1537 (size=13119) 2024-11-28T09:22:48,655 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/A/431bca0f62074e46997141da854b1ed8 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/431bca0f62074e46997141da854b1ed8 2024-11-28T09:22:48,661 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 142d3ce9e2b1d2945b57067dd3e37abc/A of 142d3ce9e2b1d2945b57067dd3e37abc into 431bca0f62074e46997141da854b1ed8(size=31.3 K), total size for store is 31.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:22:48,661 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 142d3ce9e2b1d2945b57067dd3e37abc: 2024-11-28T09:22:48,661 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc., storeName=142d3ce9e2b1d2945b57067dd3e37abc/A, priority=12, startTime=1732785768081; duration=0sec 2024-11-28T09:22:48,662 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:22:48,662 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 142d3ce9e2b1d2945b57067dd3e37abc:A 2024-11-28T09:22:48,710 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=364, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/A/b9b15d89c7f344529bb5a11be523fd94 2024-11-28T09:22:48,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/B/7a22224c95d8463fba33c50c273afe43 is 50, key is test_row_0/B:col10/1732785766762/Put/seqid=0 2024-11-28T09:22:48,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742362_1538 (size=12301) 2024-11-28T09:22:48,755 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=364 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/B/7a22224c95d8463fba33c50c273afe43 2024-11-28T09:22:48,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/C/27435382116144e2bd59b474557296dc is 50, key is test_row_0/C:col10/1732785766762/Put/seqid=0 2024-11-28T09:22:48,799 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742363_1539 (size=12301) 2024-11-28T09:22:48,799 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=364 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/C/27435382116144e2bd59b474557296dc 2024-11-28T09:22:48,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/A/b9b15d89c7f344529bb5a11be523fd94 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/b9b15d89c7f344529bb5a11be523fd94 2024-11-28T09:22:48,807 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/b9b15d89c7f344529bb5a11be523fd94, entries=150, sequenceid=364, filesize=30.5 K 2024-11-28T09:22:48,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/B/7a22224c95d8463fba33c50c273afe43 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/7a22224c95d8463fba33c50c273afe43 2024-11-28T09:22:48,813 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/7a22224c95d8463fba33c50c273afe43, entries=150, sequenceid=364, filesize=12.0 K 2024-11-28T09:22:48,814 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/C/27435382116144e2bd59b474557296dc as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/27435382116144e2bd59b474557296dc 2024-11-28T09:22:48,820 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/27435382116144e2bd59b474557296dc, entries=150, sequenceid=364, filesize=12.0 K 2024-11-28T09:22:48,821 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=0 B/0 for 142d3ce9e2b1d2945b57067dd3e37abc in 627ms, sequenceid=364, compaction requested=false 2024-11-28T09:22:48,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.HRegion(2538): Flush status journal for 142d3ce9e2b1d2945b57067dd3e37abc: 2024-11-28T09:22:48,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:48,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=121 2024-11-28T09:22:48,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4106): Remote procedure done, pid=121 2024-11-28T09:22:48,824 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=121, resume processing ppid=120 2024-11-28T09:22:48,824 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=121, ppid=120, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3380 sec 2024-11-28T09:22:48,825 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=120, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=120, table=TestAcidGuarantees in 2.3420 sec 2024-11-28T09:22:48,931 DEBUG [Thread-1926 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3dd5b441 to 127.0.0.1:53251 2024-11-28T09:22:48,931 DEBUG [Thread-1926 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T09:22:48,940 DEBUG [Thread-1922 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7b6cf8cb to 127.0.0.1:53251 2024-11-28T09:22:48,940 DEBUG [Thread-1922 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T09:22:48,946 DEBUG [Thread-1928 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3c336ea4 to 127.0.0.1:53251 2024-11-28T09:22:48,946 DEBUG [Thread-1928 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T09:22:48,949 DEBUG [Thread-1920 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1b82ba2a to 127.0.0.1:53251 2024-11-28T09:22:48,949 DEBUG [Thread-1920 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T09:22:48,968 DEBUG [Thread-1924 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7ec15031 to 127.0.0.1:53251 2024-11-28T09:22:48,968 DEBUG [Thread-1924 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T09:22:49,063 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/C/4c2fc923fe8c42df8ce756385f808d26 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/4c2fc923fe8c42df8ce756385f808d26 2024-11-28T09:22:49,068 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 142d3ce9e2b1d2945b57067dd3e37abc/C of 142d3ce9e2b1d2945b57067dd3e37abc into 4c2fc923fe8c42df8ce756385f808d26(size=12.8 K), total size for store is 24.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:22:49,068 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 142d3ce9e2b1d2945b57067dd3e37abc: 2024-11-28T09:22:49,068 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc., storeName=142d3ce9e2b1d2945b57067dd3e37abc/C, priority=12, startTime=1732785768085; duration=0sec 2024-11-28T09:22:49,068 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:22:49,068 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 142d3ce9e2b1d2945b57067dd3e37abc:C 2024-11-28T09:22:50,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-28T09:22:50,594 INFO [Thread-1930 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 120 completed 2024-11-28T09:22:50,594 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-28T09:22:50,594 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 51 2024-11-28T09:22:50,594 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 56 2024-11-28T09:22:50,594 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 53 2024-11-28T09:22:50,594 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 44 2024-11-28T09:22:50,594 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 86 2024-11-28T09:22:50,594 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-28T09:22:50,594 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-28T09:22:50,594 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1496 2024-11-28T09:22:50,594 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 4488 rows 2024-11-28T09:22:50,594 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1498 2024-11-28T09:22:50,595 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 4494 rows 2024-11-28T09:22:50,595 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1498 2024-11-28T09:22:50,595 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 4494 rows 2024-11-28T09:22:50,595 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1495 2024-11-28T09:22:50,595 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 4485 rows 2024-11-28T09:22:50,595 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1504 2024-11-28T09:22:50,595 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 4512 rows 2024-11-28T09:22:50,595 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-28T09:22:50,595 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2b976e1a to 127.0.0.1:53251 2024-11-28T09:22:50,595 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T09:22:50,604 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-28T09:22:50,608 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-28T09:22:50,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] procedure2.ProcedureExecutor(1098): Stored pid=122, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-28T09:22:50,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-28T09:22:50,612 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732785770612"}]},"ts":"1732785770612"} 2024-11-28T09:22:50,614 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-28T09:22:50,616 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-28T09:22:50,617 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=123, ppid=122, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-28T09:22:50,619 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=124, ppid=123, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=142d3ce9e2b1d2945b57067dd3e37abc, UNASSIGN}] 2024-11-28T09:22:50,619 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=124, ppid=123, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=142d3ce9e2b1d2945b57067dd3e37abc, UNASSIGN 2024-11-28T09:22:50,620 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=124 updating hbase:meta row=142d3ce9e2b1d2945b57067dd3e37abc, regionState=CLOSING, regionLocation=363d8d38a970,33819,1732785660637 2024-11-28T09:22:50,621 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-28T09:22:50,621 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=125, ppid=124, state=RUNNABLE; CloseRegionProcedure 142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637}] 2024-11-28T09:22:50,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-28T09:22:50,773 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:50,773 INFO [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] handler.UnassignRegionHandler(124): Close 142d3ce9e2b1d2945b57067dd3e37abc 2024-11-28T09:22:50,773 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-28T09:22:50,774 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegion(1681): Closing 142d3ce9e2b1d2945b57067dd3e37abc, disabling compactions & flushes 2024-11-28T09:22:50,774 INFO [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:50,774 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:50,774 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. after waiting 0 ms 2024-11-28T09:22:50,774 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:50,774 INFO [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegion(2837): Flushing 142d3ce9e2b1d2945b57067dd3e37abc 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-28T09:22:50,774 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 142d3ce9e2b1d2945b57067dd3e37abc, store=A 2024-11-28T09:22:50,774 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:50,774 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 142d3ce9e2b1d2945b57067dd3e37abc, store=B 2024-11-28T09:22:50,774 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:50,774 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 142d3ce9e2b1d2945b57067dd3e37abc, store=C 2024-11-28T09:22:50,774 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:50,780 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112886509f9fcba946e4ab7a93ac2b14c6c1_142d3ce9e2b1d2945b57067dd3e37abc is 50, key is test_row_0/A:col10/1732785768967/Put/seqid=0 2024-11-28T09:22:50,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742364_1540 (size=12454) 2024-11-28T09:22:50,786 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:50,789 INFO [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112886509f9fcba946e4ab7a93ac2b14c6c1_142d3ce9e2b1d2945b57067dd3e37abc to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112886509f9fcba946e4ab7a93ac2b14c6c1_142d3ce9e2b1d2945b57067dd3e37abc 2024-11-28T09:22:50,790 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/A/8026d744a2fc4820bb6931a99a8dc9e8, store: [table=TestAcidGuarantees family=A region=142d3ce9e2b1d2945b57067dd3e37abc] 2024-11-28T09:22:50,791 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/A/8026d744a2fc4820bb6931a99a8dc9e8 is 175, key is test_row_0/A:col10/1732785768967/Put/seqid=0 2024-11-28T09:22:50,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742365_1541 (size=31255) 2024-11-28T09:22:50,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-28T09:22:51,196 INFO [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=375, memsize=11.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/A/8026d744a2fc4820bb6931a99a8dc9e8 2024-11-28T09:22:51,202 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/B/10db97df2c90488a9caa8f4b76d90bc7 is 50, key is test_row_0/B:col10/1732785768967/Put/seqid=0 2024-11-28T09:22:51,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742366_1542 (size=12301) 2024-11-28T09:22:51,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-28T09:22:51,607 INFO [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=375 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/B/10db97df2c90488a9caa8f4b76d90bc7 2024-11-28T09:22:51,613 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/C/0bfb0e07eb8347d6a78dd6f1062ec23f is 50, key is test_row_0/C:col10/1732785768967/Put/seqid=0 2024-11-28T09:22:51,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742367_1543 (size=12301) 2024-11-28T09:22:51,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-28T09:22:52,017 INFO [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=375 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/C/0bfb0e07eb8347d6a78dd6f1062ec23f 2024-11-28T09:22:52,022 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/A/8026d744a2fc4820bb6931a99a8dc9e8 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/8026d744a2fc4820bb6931a99a8dc9e8 2024-11-28T09:22:52,026 INFO [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/8026d744a2fc4820bb6931a99a8dc9e8, entries=150, sequenceid=375, filesize=30.5 K 2024-11-28T09:22:52,026 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/B/10db97df2c90488a9caa8f4b76d90bc7 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/10db97df2c90488a9caa8f4b76d90bc7 2024-11-28T09:22:52,031 INFO [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/10db97df2c90488a9caa8f4b76d90bc7, entries=150, sequenceid=375, filesize=12.0 K 2024-11-28T09:22:52,032 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/.tmp/C/0bfb0e07eb8347d6a78dd6f1062ec23f as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/0bfb0e07eb8347d6a78dd6f1062ec23f 2024-11-28T09:22:52,037 INFO [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/0bfb0e07eb8347d6a78dd6f1062ec23f, entries=150, sequenceid=375, filesize=12.0 K 2024-11-28T09:22:52,037 INFO [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for 142d3ce9e2b1d2945b57067dd3e37abc in 1263ms, sequenceid=375, compaction requested=true 2024-11-28T09:22:52,044 DEBUG [StoreCloser-TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/f07a8b5e93ff4deda21b8e465a99d477, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/fbff41a849984052b04b647d54f31255, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/c8e86c4b7c984ffd975bcf2efd887784, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/c0b466dab6014952b381e78136dc6a32, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/8e63850c7286463da5a4e999f9bd18ee, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/f6ad92541d9745e380ad0b4cb3591575, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/d576e47544724479abff7d83f7e68590, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/f91a499d276a4003a634dc506d93facd, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/0c043ed9e79b4fdb96c5264ae8185931, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/6403a38949e34719857556201c22a871, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/c13d4b47a73e46a48f4de8a59a41ef33, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/c3f554a1f28840378dcbbd0cdf032326, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/48097a7d0c4a4569986dd944c514cf2a, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/1e24d9b7f17b4113b44e51249c984fe2, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/0ae28d0e11e24c82aaffaff7240dc005, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/8597c5b5246d46a0ba2acec4b0617e9f, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/4930defe2d094768b1c5799820dab4a7, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/a835524f570149218c69a1c2aa42c098, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/80db28c3706f49c3b714ced395221966, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/20e8aee5f7184f5083d86b7c387ea6eb, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/4ee7320b595a4a5197300ceccbd16f47, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/6a8779a2ee3b4cbfb0cc3eaef0986573, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/bc8e20cf73b64046b882604bc8ed96a5, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/57afd689faca4ce0adddcfdfac38e819] to archive 2024-11-28T09:22:52,055 DEBUG [StoreCloser-TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-28T09:22:52,067 DEBUG [StoreCloser-TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/f07a8b5e93ff4deda21b8e465a99d477 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/f07a8b5e93ff4deda21b8e465a99d477 2024-11-28T09:22:52,069 DEBUG [StoreCloser-TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/fbff41a849984052b04b647d54f31255 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/fbff41a849984052b04b647d54f31255 2024-11-28T09:22:52,070 DEBUG [StoreCloser-TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/c8e86c4b7c984ffd975bcf2efd887784 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/c8e86c4b7c984ffd975bcf2efd887784 2024-11-28T09:22:52,071 DEBUG [StoreCloser-TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/c0b466dab6014952b381e78136dc6a32 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/c0b466dab6014952b381e78136dc6a32 2024-11-28T09:22:52,073 DEBUG [StoreCloser-TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/8e63850c7286463da5a4e999f9bd18ee to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/8e63850c7286463da5a4e999f9bd18ee 2024-11-28T09:22:52,074 DEBUG [StoreCloser-TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/f6ad92541d9745e380ad0b4cb3591575 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/f6ad92541d9745e380ad0b4cb3591575 2024-11-28T09:22:52,076 DEBUG [StoreCloser-TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/d576e47544724479abff7d83f7e68590 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/d576e47544724479abff7d83f7e68590 2024-11-28T09:22:52,077 DEBUG [StoreCloser-TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/f91a499d276a4003a634dc506d93facd to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/f91a499d276a4003a634dc506d93facd 2024-11-28T09:22:52,079 DEBUG [StoreCloser-TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/0c043ed9e79b4fdb96c5264ae8185931 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/0c043ed9e79b4fdb96c5264ae8185931 2024-11-28T09:22:52,080 DEBUG [StoreCloser-TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/6403a38949e34719857556201c22a871 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/6403a38949e34719857556201c22a871 2024-11-28T09:22:52,081 DEBUG [StoreCloser-TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/c13d4b47a73e46a48f4de8a59a41ef33 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/c13d4b47a73e46a48f4de8a59a41ef33 2024-11-28T09:22:52,083 DEBUG [StoreCloser-TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/c3f554a1f28840378dcbbd0cdf032326 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/c3f554a1f28840378dcbbd0cdf032326 2024-11-28T09:22:52,085 DEBUG [StoreCloser-TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/48097a7d0c4a4569986dd944c514cf2a to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/48097a7d0c4a4569986dd944c514cf2a 2024-11-28T09:22:52,086 DEBUG [StoreCloser-TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/1e24d9b7f17b4113b44e51249c984fe2 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/1e24d9b7f17b4113b44e51249c984fe2 2024-11-28T09:22:52,087 DEBUG [StoreCloser-TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/0ae28d0e11e24c82aaffaff7240dc005 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/0ae28d0e11e24c82aaffaff7240dc005 2024-11-28T09:22:52,089 DEBUG [StoreCloser-TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/8597c5b5246d46a0ba2acec4b0617e9f to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/8597c5b5246d46a0ba2acec4b0617e9f 2024-11-28T09:22:52,091 DEBUG [StoreCloser-TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/4930defe2d094768b1c5799820dab4a7 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/4930defe2d094768b1c5799820dab4a7 2024-11-28T09:22:52,092 DEBUG [StoreCloser-TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/a835524f570149218c69a1c2aa42c098 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/a835524f570149218c69a1c2aa42c098 2024-11-28T09:22:52,093 DEBUG [StoreCloser-TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/80db28c3706f49c3b714ced395221966 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/80db28c3706f49c3b714ced395221966 2024-11-28T09:22:52,095 DEBUG [StoreCloser-TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/20e8aee5f7184f5083d86b7c387ea6eb to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/20e8aee5f7184f5083d86b7c387ea6eb 2024-11-28T09:22:52,096 DEBUG [StoreCloser-TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/4ee7320b595a4a5197300ceccbd16f47 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/4ee7320b595a4a5197300ceccbd16f47 2024-11-28T09:22:52,097 DEBUG [StoreCloser-TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/6a8779a2ee3b4cbfb0cc3eaef0986573 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/6a8779a2ee3b4cbfb0cc3eaef0986573 2024-11-28T09:22:52,098 DEBUG [StoreCloser-TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/bc8e20cf73b64046b882604bc8ed96a5 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/bc8e20cf73b64046b882604bc8ed96a5 2024-11-28T09:22:52,100 DEBUG [StoreCloser-TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/57afd689faca4ce0adddcfdfac38e819 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/57afd689faca4ce0adddcfdfac38e819 2024-11-28T09:22:52,116 DEBUG [StoreCloser-TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/909969ad3f9f44c791011cc764319ed2, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/88e7d440064c4b96bc12d1f2ffb0c169, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/546b38a4c399464bb0981a273c8b80d6, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/f2d7c81b54594d8e9f18e931ece1166d, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/7961d4033b0c43f8be5154936fbacb3f, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/edfdfd1bf3364618b5a3611780130a48, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/4ca81648963b408682ed9e76242222d4, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/5638a288bccb46b2b83e8cde211510cd, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/376a3666bbfa40e68d40b0c944620255, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/00e0d34660104a85a6c1c13576666f73, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/16c7fbf3ef574e68a26b492dc1ffa58e, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/a39cff7cf5c34c849ab16d39679b3603, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/af2b9a1e49ab4baeaca6b9ff982c9707, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/184cf1c872ef40219e68d88846704405, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/e95dc8cf38b347799acd73090c09520a, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/0acfbd881d5140bba34928d54b88b2be, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/d791731dbde740daaecc087694cfa11d, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/29596a67de954defbbfa38c3d79ee62c, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/150f84c4cc384e10b4652a8bca633a11, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/51643fb9b1cb4323af09f51ed2570735, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/9dc50e1cfd7b4ba29e9e8febf98d7e72, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/3fe174329ad44809afed859a08c2ace2, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/21ea40494ef74340b7a5bc9f22e04a4d, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/ef4a8a2b31bf4e2194076df03c43a487] to archive 2024-11-28T09:22:52,117 DEBUG [StoreCloser-TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-28T09:22:52,119 DEBUG [StoreCloser-TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/909969ad3f9f44c791011cc764319ed2 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/909969ad3f9f44c791011cc764319ed2 2024-11-28T09:22:52,121 DEBUG [StoreCloser-TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/88e7d440064c4b96bc12d1f2ffb0c169 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/88e7d440064c4b96bc12d1f2ffb0c169 2024-11-28T09:22:52,122 DEBUG [StoreCloser-TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/546b38a4c399464bb0981a273c8b80d6 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/546b38a4c399464bb0981a273c8b80d6 2024-11-28T09:22:52,124 DEBUG [StoreCloser-TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/f2d7c81b54594d8e9f18e931ece1166d to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/f2d7c81b54594d8e9f18e931ece1166d 2024-11-28T09:22:52,125 DEBUG [StoreCloser-TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/7961d4033b0c43f8be5154936fbacb3f to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/7961d4033b0c43f8be5154936fbacb3f 2024-11-28T09:22:52,126 DEBUG [StoreCloser-TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/edfdfd1bf3364618b5a3611780130a48 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/edfdfd1bf3364618b5a3611780130a48 2024-11-28T09:22:52,127 DEBUG [StoreCloser-TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/4ca81648963b408682ed9e76242222d4 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/4ca81648963b408682ed9e76242222d4 2024-11-28T09:22:52,129 DEBUG [StoreCloser-TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/5638a288bccb46b2b83e8cde211510cd to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/5638a288bccb46b2b83e8cde211510cd 2024-11-28T09:22:52,130 DEBUG [StoreCloser-TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/376a3666bbfa40e68d40b0c944620255 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/376a3666bbfa40e68d40b0c944620255 2024-11-28T09:22:52,131 DEBUG [StoreCloser-TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/00e0d34660104a85a6c1c13576666f73 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/00e0d34660104a85a6c1c13576666f73 2024-11-28T09:22:52,133 DEBUG [StoreCloser-TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/16c7fbf3ef574e68a26b492dc1ffa58e to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/16c7fbf3ef574e68a26b492dc1ffa58e 2024-11-28T09:22:52,135 DEBUG [StoreCloser-TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/a39cff7cf5c34c849ab16d39679b3603 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/a39cff7cf5c34c849ab16d39679b3603 2024-11-28T09:22:52,136 DEBUG [StoreCloser-TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/af2b9a1e49ab4baeaca6b9ff982c9707 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/af2b9a1e49ab4baeaca6b9ff982c9707 2024-11-28T09:22:52,138 DEBUG [StoreCloser-TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/184cf1c872ef40219e68d88846704405 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/184cf1c872ef40219e68d88846704405 2024-11-28T09:22:52,140 DEBUG [StoreCloser-TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/e95dc8cf38b347799acd73090c09520a to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/e95dc8cf38b347799acd73090c09520a 2024-11-28T09:22:52,142 DEBUG [StoreCloser-TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/0acfbd881d5140bba34928d54b88b2be to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/0acfbd881d5140bba34928d54b88b2be 2024-11-28T09:22:52,143 DEBUG [StoreCloser-TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/d791731dbde740daaecc087694cfa11d to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/d791731dbde740daaecc087694cfa11d 2024-11-28T09:22:52,144 DEBUG [StoreCloser-TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/29596a67de954defbbfa38c3d79ee62c to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/29596a67de954defbbfa38c3d79ee62c 2024-11-28T09:22:52,145 DEBUG [StoreCloser-TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/150f84c4cc384e10b4652a8bca633a11 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/150f84c4cc384e10b4652a8bca633a11 2024-11-28T09:22:52,146 DEBUG [StoreCloser-TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/51643fb9b1cb4323af09f51ed2570735 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/51643fb9b1cb4323af09f51ed2570735 2024-11-28T09:22:52,148 DEBUG [StoreCloser-TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/9dc50e1cfd7b4ba29e9e8febf98d7e72 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/9dc50e1cfd7b4ba29e9e8febf98d7e72 2024-11-28T09:22:52,149 DEBUG [StoreCloser-TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/3fe174329ad44809afed859a08c2ace2 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/3fe174329ad44809afed859a08c2ace2 2024-11-28T09:22:52,151 DEBUG [StoreCloser-TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/21ea40494ef74340b7a5bc9f22e04a4d to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/21ea40494ef74340b7a5bc9f22e04a4d 2024-11-28T09:22:52,152 DEBUG [StoreCloser-TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/ef4a8a2b31bf4e2194076df03c43a487 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/ef4a8a2b31bf4e2194076df03c43a487 2024-11-28T09:22:52,154 DEBUG [StoreCloser-TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/34028b9959c64cea9b800d9b9ae722aa, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/7309b2c8997c48808e7a634cf3a23b6c, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/ee1b5b1ee1f749f9b0506a3985af4d2d, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/cc0cfa95b90040fdbe1d7e262693829b, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/d3a54be57f19461981e9637f94a2468e, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/637e913db3834921bc81f7db702dd62f, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/0c37760fe8014ee493983c128709d3e2, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/907f7a5605db4fd4a05d8ddc1610d455, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/b4026b09bf2242819362a6399e7f1d78, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/0fdb5d0e62e04065bab05d56402a2414, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/6ccc74b3cb584030aea277e1ee35c95c, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/142eb7924d4f400da01063568ccc8d1a, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/168985dab35343cda97445b62c392323, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/9c83ee1ba338496cbcd53d283c7c4081, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/e9e81dad42b148a3947a26f4c0ac47a4, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/3d66049a42754c4093041ba6623befaf, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/a19f92693b1f4124bcae1d3c4f5b42fe, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/d8df95b7347a43628914939542a45fae, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/8061949c9f3e40cea2d04dff8e73580f, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/3caef55a5afa4ab8971c89fcb3b99440, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/cf2132c536f943d4819b7b670930d9c3, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/3b4e7aa95b1b44a49098adfaf219891b, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/346f604799184495be117e7ab4c392e3, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/c2324d2bbc30402eb8d9e925317c9538] to archive 2024-11-28T09:22:52,155 DEBUG [StoreCloser-TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-28T09:22:52,158 DEBUG [StoreCloser-TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/34028b9959c64cea9b800d9b9ae722aa to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/34028b9959c64cea9b800d9b9ae722aa 2024-11-28T09:22:52,159 DEBUG [StoreCloser-TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/7309b2c8997c48808e7a634cf3a23b6c to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/7309b2c8997c48808e7a634cf3a23b6c 2024-11-28T09:22:52,161 DEBUG [StoreCloser-TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/ee1b5b1ee1f749f9b0506a3985af4d2d to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/ee1b5b1ee1f749f9b0506a3985af4d2d 2024-11-28T09:22:52,162 DEBUG [StoreCloser-TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/cc0cfa95b90040fdbe1d7e262693829b to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/cc0cfa95b90040fdbe1d7e262693829b 2024-11-28T09:22:52,163 DEBUG [StoreCloser-TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/d3a54be57f19461981e9637f94a2468e to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/d3a54be57f19461981e9637f94a2468e 2024-11-28T09:22:52,164 DEBUG [StoreCloser-TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/637e913db3834921bc81f7db702dd62f to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/637e913db3834921bc81f7db702dd62f 2024-11-28T09:22:52,168 DEBUG [StoreCloser-TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/0c37760fe8014ee493983c128709d3e2 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/0c37760fe8014ee493983c128709d3e2 2024-11-28T09:22:52,169 DEBUG [StoreCloser-TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/907f7a5605db4fd4a05d8ddc1610d455 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/907f7a5605db4fd4a05d8ddc1610d455 2024-11-28T09:22:52,170 DEBUG [StoreCloser-TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/b4026b09bf2242819362a6399e7f1d78 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/b4026b09bf2242819362a6399e7f1d78 2024-11-28T09:22:52,171 DEBUG [StoreCloser-TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/0fdb5d0e62e04065bab05d56402a2414 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/0fdb5d0e62e04065bab05d56402a2414 2024-11-28T09:22:52,172 DEBUG [StoreCloser-TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/6ccc74b3cb584030aea277e1ee35c95c to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/6ccc74b3cb584030aea277e1ee35c95c 2024-11-28T09:22:52,175 DEBUG [StoreCloser-TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/142eb7924d4f400da01063568ccc8d1a to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/142eb7924d4f400da01063568ccc8d1a 2024-11-28T09:22:52,176 DEBUG [StoreCloser-TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/168985dab35343cda97445b62c392323 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/168985dab35343cda97445b62c392323 2024-11-28T09:22:52,177 DEBUG [StoreCloser-TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/9c83ee1ba338496cbcd53d283c7c4081 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/9c83ee1ba338496cbcd53d283c7c4081 2024-11-28T09:22:52,178 DEBUG [StoreCloser-TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/e9e81dad42b148a3947a26f4c0ac47a4 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/e9e81dad42b148a3947a26f4c0ac47a4 2024-11-28T09:22:52,181 DEBUG [StoreCloser-TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/3d66049a42754c4093041ba6623befaf to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/3d66049a42754c4093041ba6623befaf 2024-11-28T09:22:52,182 DEBUG [StoreCloser-TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/a19f92693b1f4124bcae1d3c4f5b42fe to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/a19f92693b1f4124bcae1d3c4f5b42fe 2024-11-28T09:22:52,183 DEBUG [StoreCloser-TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/d8df95b7347a43628914939542a45fae to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/d8df95b7347a43628914939542a45fae 2024-11-28T09:22:52,185 DEBUG [StoreCloser-TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/8061949c9f3e40cea2d04dff8e73580f to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/8061949c9f3e40cea2d04dff8e73580f 2024-11-28T09:22:52,186 DEBUG [StoreCloser-TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/3caef55a5afa4ab8971c89fcb3b99440 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/3caef55a5afa4ab8971c89fcb3b99440 2024-11-28T09:22:52,187 DEBUG [StoreCloser-TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/cf2132c536f943d4819b7b670930d9c3 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/cf2132c536f943d4819b7b670930d9c3 2024-11-28T09:22:52,189 DEBUG [StoreCloser-TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/3b4e7aa95b1b44a49098adfaf219891b to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/3b4e7aa95b1b44a49098adfaf219891b 2024-11-28T09:22:52,190 DEBUG [StoreCloser-TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/346f604799184495be117e7ab4c392e3 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/346f604799184495be117e7ab4c392e3 2024-11-28T09:22:52,192 DEBUG [StoreCloser-TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/c2324d2bbc30402eb8d9e925317c9538 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/c2324d2bbc30402eb8d9e925317c9538 2024-11-28T09:22:52,219 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/recovered.edits/378.seqid, newMaxSeqId=378, maxSeqId=4 2024-11-28T09:22:52,220 INFO [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc. 2024-11-28T09:22:52,220 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] regionserver.HRegion(1635): Region close journal for 142d3ce9e2b1d2945b57067dd3e37abc: 2024-11-28T09:22:52,222 INFO [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=125}] handler.UnassignRegionHandler(170): Closed 142d3ce9e2b1d2945b57067dd3e37abc 2024-11-28T09:22:52,223 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=124 updating hbase:meta row=142d3ce9e2b1d2945b57067dd3e37abc, regionState=CLOSED 2024-11-28T09:22:52,225 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=125, resume processing ppid=124 2024-11-28T09:22:52,225 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=125, ppid=124, state=SUCCESS; CloseRegionProcedure 142d3ce9e2b1d2945b57067dd3e37abc, server=363d8d38a970,33819,1732785660637 in 1.6030 sec 2024-11-28T09:22:52,227 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=124, resume processing ppid=123 2024-11-28T09:22:52,227 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=124, ppid=123, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=142d3ce9e2b1d2945b57067dd3e37abc, UNASSIGN in 1.6070 sec 2024-11-28T09:22:52,229 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=123, resume processing ppid=122 2024-11-28T09:22:52,229 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=123, ppid=122, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.6100 sec 2024-11-28T09:22:52,230 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732785772230"}]},"ts":"1732785772230"} 2024-11-28T09:22:52,231 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-28T09:22:52,233 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-28T09:22:52,235 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=122, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.6260 sec 2024-11-28T09:22:52,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-11-28T09:22:52,717 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 122 completed 2024-11-28T09:22:52,720 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-28T09:22:52,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] procedure2.ProcedureExecutor(1098): Stored pid=126, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-28T09:22:52,722 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=126, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-28T09:22:52,722 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=126, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-28T09:22:52,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-28T09:22:52,758 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc 2024-11-28T09:22:52,762 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A, FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B, FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C, FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/recovered.edits] 2024-11-28T09:22:52,768 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/431bca0f62074e46997141da854b1ed8 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/431bca0f62074e46997141da854b1ed8 2024-11-28T09:22:52,771 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/8026d744a2fc4820bb6931a99a8dc9e8 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/8026d744a2fc4820bb6931a99a8dc9e8 2024-11-28T09:22:52,773 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/b9b15d89c7f344529bb5a11be523fd94 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/A/b9b15d89c7f344529bb5a11be523fd94 2024-11-28T09:22:52,775 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/10db97df2c90488a9caa8f4b76d90bc7 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/10db97df2c90488a9caa8f4b76d90bc7 2024-11-28T09:22:52,777 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/620e87879b7948d9b02d569fc5712df8 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/620e87879b7948d9b02d569fc5712df8 2024-11-28T09:22:52,778 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/7a22224c95d8463fba33c50c273afe43 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/B/7a22224c95d8463fba33c50c273afe43 2024-11-28T09:22:52,782 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/0bfb0e07eb8347d6a78dd6f1062ec23f to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/0bfb0e07eb8347d6a78dd6f1062ec23f 2024-11-28T09:22:52,784 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/27435382116144e2bd59b474557296dc to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/27435382116144e2bd59b474557296dc 2024-11-28T09:22:52,786 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/4c2fc923fe8c42df8ce756385f808d26 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/C/4c2fc923fe8c42df8ce756385f808d26 2024-11-28T09:22:52,790 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/recovered.edits/378.seqid to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc/recovered.edits/378.seqid 2024-11-28T09:22:52,792 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/142d3ce9e2b1d2945b57067dd3e37abc 2024-11-28T09:22:52,792 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-28T09:22:52,793 DEBUG [PEWorker-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-28T09:22:52,794 DEBUG [PEWorker-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-11-28T09:22:52,799 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411280d167268465d4a2f84d548aa2d3a3c89_142d3ce9e2b1d2945b57067dd3e37abc to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411280d167268465d4a2f84d548aa2d3a3c89_142d3ce9e2b1d2945b57067dd3e37abc 2024-11-28T09:22:52,801 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128101808ec220f432ba75412a8fdaf5e34_142d3ce9e2b1d2945b57067dd3e37abc to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128101808ec220f432ba75412a8fdaf5e34_142d3ce9e2b1d2945b57067dd3e37abc 2024-11-28T09:22:52,803 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112811e77a710c18493a949bb5b405b93424_142d3ce9e2b1d2945b57067dd3e37abc to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112811e77a710c18493a949bb5b405b93424_142d3ce9e2b1d2945b57067dd3e37abc 2024-11-28T09:22:52,805 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128195f586fcc7f4d968ee88d8cad040278_142d3ce9e2b1d2945b57067dd3e37abc to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128195f586fcc7f4d968ee88d8cad040278_142d3ce9e2b1d2945b57067dd3e37abc 2024-11-28T09:22:52,807 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411282372dc64ab9641bbb258a69a1bcc08cf_142d3ce9e2b1d2945b57067dd3e37abc to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411282372dc64ab9641bbb258a69a1bcc08cf_142d3ce9e2b1d2945b57067dd3e37abc 2024-11-28T09:22:52,809 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128356e512c37cd4fa3852546f028a69a7b_142d3ce9e2b1d2945b57067dd3e37abc to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128356e512c37cd4fa3852546f028a69a7b_142d3ce9e2b1d2945b57067dd3e37abc 2024-11-28T09:22:52,810 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112861cc2ceb5cdc48afa48c48d31b44cd71_142d3ce9e2b1d2945b57067dd3e37abc to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112861cc2ceb5cdc48afa48c48d31b44cd71_142d3ce9e2b1d2945b57067dd3e37abc 2024-11-28T09:22:52,815 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112866b71ad7ac144b1aa103736dc0cd9316_142d3ce9e2b1d2945b57067dd3e37abc to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112866b71ad7ac144b1aa103736dc0cd9316_142d3ce9e2b1d2945b57067dd3e37abc 2024-11-28T09:22:52,816 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411286a0f2f5cf9eb4c0391f42140877ff21b_142d3ce9e2b1d2945b57067dd3e37abc to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411286a0f2f5cf9eb4c0391f42140877ff21b_142d3ce9e2b1d2945b57067dd3e37abc 2024-11-28T09:22:52,817 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411286ee33d7a911d48b0847546dec785a533_142d3ce9e2b1d2945b57067dd3e37abc to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411286ee33d7a911d48b0847546dec785a533_142d3ce9e2b1d2945b57067dd3e37abc 2024-11-28T09:22:52,819 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128786464ad1ffc43499561bb9a9126f6be_142d3ce9e2b1d2945b57067dd3e37abc to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128786464ad1ffc43499561bb9a9126f6be_142d3ce9e2b1d2945b57067dd3e37abc 2024-11-28T09:22:52,821 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112886509f9fcba946e4ab7a93ac2b14c6c1_142d3ce9e2b1d2945b57067dd3e37abc to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112886509f9fcba946e4ab7a93ac2b14c6c1_142d3ce9e2b1d2945b57067dd3e37abc 2024-11-28T09:22:52,822 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112890c8b116dce24af4b36574aed387c44e_142d3ce9e2b1d2945b57067dd3e37abc to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112890c8b116dce24af4b36574aed387c44e_142d3ce9e2b1d2945b57067dd3e37abc 2024-11-28T09:22:52,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-28T09:22:52,824 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411289900884d052743f2aef8ab3ad8e10166_142d3ce9e2b1d2945b57067dd3e37abc to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411289900884d052743f2aef8ab3ad8e10166_142d3ce9e2b1d2945b57067dd3e37abc 2024-11-28T09:22:52,825 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411289ff28590b9294e5bb3dbc215bcdabd34_142d3ce9e2b1d2945b57067dd3e37abc to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411289ff28590b9294e5bb3dbc215bcdabd34_142d3ce9e2b1d2945b57067dd3e37abc 2024-11-28T09:22:52,827 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128b29d0b10d03b4bae85f94a044de05513_142d3ce9e2b1d2945b57067dd3e37abc to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128b29d0b10d03b4bae85f94a044de05513_142d3ce9e2b1d2945b57067dd3e37abc 2024-11-28T09:22:52,828 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128c09beb7b18a346eab98bed0ba918094c_142d3ce9e2b1d2945b57067dd3e37abc to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128c09beb7b18a346eab98bed0ba918094c_142d3ce9e2b1d2945b57067dd3e37abc 2024-11-28T09:22:52,830 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128cbb59c52858a477ab4631a67ce2ef6f1_142d3ce9e2b1d2945b57067dd3e37abc to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128cbb59c52858a477ab4631a67ce2ef6f1_142d3ce9e2b1d2945b57067dd3e37abc 2024-11-28T09:22:52,831 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128d1471161206e4661855177bd5e7e6409_142d3ce9e2b1d2945b57067dd3e37abc to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128d1471161206e4661855177bd5e7e6409_142d3ce9e2b1d2945b57067dd3e37abc 2024-11-28T09:22:52,832 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128e215dfb9d5e84b93b45836be3511ef64_142d3ce9e2b1d2945b57067dd3e37abc to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128e215dfb9d5e84b93b45836be3511ef64_142d3ce9e2b1d2945b57067dd3e37abc 2024-11-28T09:22:52,833 DEBUG [PEWorker-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-28T09:22:52,835 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=126, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-28T09:22:52,837 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-28T09:22:52,840 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-28T09:22:52,841 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=126, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-28T09:22:52,841 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-28T09:22:52,841 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732785772841"}]},"ts":"9223372036854775807"} 2024-11-28T09:22:52,846 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-28T09:22:52,846 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 142d3ce9e2b1d2945b57067dd3e37abc, NAME => 'TestAcidGuarantees,,1732785746236.142d3ce9e2b1d2945b57067dd3e37abc.', STARTKEY => '', ENDKEY => ''}] 2024-11-28T09:22:52,846 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-28T09:22:52,846 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732785772846"}]},"ts":"9223372036854775807"} 2024-11-28T09:22:52,848 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-28T09:22:52,855 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=126, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-28T09:22:52,856 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=126, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 135 msec 2024-11-28T09:22:53,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=126 2024-11-28T09:22:53,024 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 126 completed 2024-11-28T09:22:53,036 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithBasicPolicy#testMobScanAtomicity Thread=241 (was 241), OpenFileDescriptor=460 (was 458) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=689 (was 594) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=4212 (was 4376) 2024-11-28T09:22:53,048 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithBasicPolicy#testScanAtomicity Thread=241, OpenFileDescriptor=460, MaxFileDescriptor=1048576, SystemLoadAverage=689, ProcessCount=11, AvailableMemoryMB=4212 2024-11-28T09:22:53,049 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-28T09:22:53,049 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-28T09:22:53,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] procedure2.ProcedureExecutor(1098): Stored pid=127, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-28T09:22:53,051 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=127, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-28T09:22:53,051 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:53,051 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 127 2024-11-28T09:22:53,052 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=127, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-28T09:22:53,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=127 2024-11-28T09:22:53,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742368_1544 (size=960) 2024-11-28T09:22:53,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=127 2024-11-28T09:22:53,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=127 2024-11-28T09:22:53,461 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532 2024-11-28T09:22:53,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742369_1545 (size=53) 2024-11-28T09:22:53,473 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T09:22:53,473 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing f65103e784e3773002ae1a64e3eece97, disabling compactions & flushes 2024-11-28T09:22:53,473 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:22:53,473 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:22:53,473 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. after waiting 0 ms 2024-11-28T09:22:53,473 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:22:53,473 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:22:53,473 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for f65103e784e3773002ae1a64e3eece97: 2024-11-28T09:22:53,474 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=127, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-28T09:22:53,474 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732785773474"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732785773474"}]},"ts":"1732785773474"} 2024-11-28T09:22:53,475 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-28T09:22:53,476 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=127, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-28T09:22:53,476 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732785773476"}]},"ts":"1732785773476"} 2024-11-28T09:22:53,477 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-28T09:22:53,482 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=128, ppid=127, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=f65103e784e3773002ae1a64e3eece97, ASSIGN}] 2024-11-28T09:22:53,483 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=128, ppid=127, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=f65103e784e3773002ae1a64e3eece97, ASSIGN 2024-11-28T09:22:53,484 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=128, ppid=127, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=f65103e784e3773002ae1a64e3eece97, ASSIGN; state=OFFLINE, location=363d8d38a970,33819,1732785660637; forceNewPlan=false, retain=false 2024-11-28T09:22:53,635 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=128 updating hbase:meta row=f65103e784e3773002ae1a64e3eece97, regionState=OPENING, regionLocation=363d8d38a970,33819,1732785660637 2024-11-28T09:22:53,637 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=129, ppid=128, state=RUNNABLE; OpenRegionProcedure f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637}] 2024-11-28T09:22:53,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=127 2024-11-28T09:22:53,789 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:53,793 INFO [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:22:53,793 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(7285): Opening region: {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} 2024-11-28T09:22:53,793 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees f65103e784e3773002ae1a64e3eece97 2024-11-28T09:22:53,793 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T09:22:53,794 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(7327): checking encryption for f65103e784e3773002ae1a64e3eece97 2024-11-28T09:22:53,794 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(7330): checking classloading for f65103e784e3773002ae1a64e3eece97 2024-11-28T09:22:53,795 INFO [StoreOpener-f65103e784e3773002ae1a64e3eece97-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region f65103e784e3773002ae1a64e3eece97 2024-11-28T09:22:53,797 INFO [StoreOpener-f65103e784e3773002ae1a64e3eece97-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-28T09:22:53,798 INFO [StoreOpener-f65103e784e3773002ae1a64e3eece97-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region f65103e784e3773002ae1a64e3eece97 columnFamilyName A 2024-11-28T09:22:53,798 DEBUG [StoreOpener-f65103e784e3773002ae1a64e3eece97-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:53,799 INFO [StoreOpener-f65103e784e3773002ae1a64e3eece97-1 {}] regionserver.HStore(327): Store=f65103e784e3773002ae1a64e3eece97/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T09:22:53,799 INFO [StoreOpener-f65103e784e3773002ae1a64e3eece97-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region f65103e784e3773002ae1a64e3eece97 2024-11-28T09:22:53,813 INFO [StoreOpener-f65103e784e3773002ae1a64e3eece97-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-28T09:22:53,813 INFO [StoreOpener-f65103e784e3773002ae1a64e3eece97-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region f65103e784e3773002ae1a64e3eece97 columnFamilyName B 2024-11-28T09:22:53,813 DEBUG [StoreOpener-f65103e784e3773002ae1a64e3eece97-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:53,817 INFO [StoreOpener-f65103e784e3773002ae1a64e3eece97-1 {}] regionserver.HStore(327): Store=f65103e784e3773002ae1a64e3eece97/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T09:22:53,817 INFO [StoreOpener-f65103e784e3773002ae1a64e3eece97-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region f65103e784e3773002ae1a64e3eece97 2024-11-28T09:22:53,822 INFO [StoreOpener-f65103e784e3773002ae1a64e3eece97-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-28T09:22:53,822 INFO [StoreOpener-f65103e784e3773002ae1a64e3eece97-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region f65103e784e3773002ae1a64e3eece97 columnFamilyName C 2024-11-28T09:22:53,822 DEBUG [StoreOpener-f65103e784e3773002ae1a64e3eece97-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:22:53,824 INFO [StoreOpener-f65103e784e3773002ae1a64e3eece97-1 {}] regionserver.HStore(327): Store=f65103e784e3773002ae1a64e3eece97/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T09:22:53,824 INFO [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:22:53,826 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97 2024-11-28T09:22:53,827 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97 2024-11-28T09:22:53,829 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-28T09:22:53,833 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(1085): writing seq id for f65103e784e3773002ae1a64e3eece97 2024-11-28T09:22:53,841 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-28T09:22:53,841 INFO [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(1102): Opened f65103e784e3773002ae1a64e3eece97; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69498514, jitterRate=0.03560855984687805}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-28T09:22:53,842 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegion(1001): Region open journal for f65103e784e3773002ae1a64e3eece97: 2024-11-28T09:22:53,843 INFO [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97., pid=129, masterSystemTime=1732785773789 2024-11-28T09:22:53,845 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:22:53,845 INFO [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=129}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:22:53,845 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=128 updating hbase:meta row=f65103e784e3773002ae1a64e3eece97, regionState=OPEN, openSeqNum=2, regionLocation=363d8d38a970,33819,1732785660637 2024-11-28T09:22:53,848 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=129, resume processing ppid=128 2024-11-28T09:22:53,848 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=129, ppid=128, state=SUCCESS; OpenRegionProcedure f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 in 210 msec 2024-11-28T09:22:53,849 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=128, resume processing ppid=127 2024-11-28T09:22:53,850 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=128, ppid=127, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=f65103e784e3773002ae1a64e3eece97, ASSIGN in 366 msec 2024-11-28T09:22:53,850 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=127, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-28T09:22:53,851 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732785773851"}]},"ts":"1732785773851"} 2024-11-28T09:22:53,852 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-28T09:22:53,856 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=127, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-28T09:22:53,857 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=127, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 807 msec 2024-11-28T09:22:54,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=127 2024-11-28T09:22:54,156 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 127 completed 2024-11-28T09:22:54,158 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3512017b to 127.0.0.1:53251 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@301741f1 2024-11-28T09:22:54,195 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@22a6e9f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T09:22:54,204 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T09:22:54,205 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32956, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T09:22:54,207 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-28T09:22:54,208 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48852, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-28T09:22:54,209 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x695c2253 to 127.0.0.1:53251 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@63cefe40 2024-11-28T09:22:54,241 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@32c12a30, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T09:22:54,242 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7177efc9 to 127.0.0.1:53251 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@65df2359 2024-11-28T09:22:54,282 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5ef40578, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T09:22:54,283 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x61d38088 to 127.0.0.1:53251 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7d0ab200 2024-11-28T09:22:54,298 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@32bb71c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T09:22:54,299 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7043f683 to 127.0.0.1:53251 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5871c039 2024-11-28T09:22:54,302 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6bc0f7c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T09:22:54,303 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2b0c2472 to 127.0.0.1:53251 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7daa5922 2024-11-28T09:22:54,308 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1b8b6e04, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T09:22:54,309 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1b7f20c4 to 127.0.0.1:53251 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5bc486e1 2024-11-28T09:22:54,315 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@11193a0c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T09:22:54,317 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5f7c40ba to 127.0.0.1:53251 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2070263a 2024-11-28T09:22:54,320 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7861b162, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T09:22:54,321 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x41b0e7b6 to 127.0.0.1:53251 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6050584c 2024-11-28T09:22:54,327 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@154f0f85, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T09:22:54,328 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0f2423f3 to 127.0.0.1:53251 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6dd48863 2024-11-28T09:22:54,332 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@8a917b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T09:22:54,333 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x184771cf to 127.0.0.1:53251 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@51196534 2024-11-28T09:22:54,340 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@54c2725, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T09:22:54,360 DEBUG [hconnection-0x6b1cf8f7-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T09:22:54,363 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32964, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T09:22:54,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on f65103e784e3773002ae1a64e3eece97 2024-11-28T09:22:54,385 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f65103e784e3773002ae1a64e3eece97 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-28T09:22:54,386 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f65103e784e3773002ae1a64e3eece97, store=A 2024-11-28T09:22:54,386 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:54,386 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f65103e784e3773002ae1a64e3eece97, store=B 2024-11-28T09:22:54,386 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:54,386 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f65103e784e3773002ae1a64e3eece97, store=C 2024-11-28T09:22:54,386 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:54,391 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T09:22:54,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] procedure2.ProcedureExecutor(1098): Stored pid=130, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees 2024-11-28T09:22:54,393 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=130, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T09:22:54,393 DEBUG [hconnection-0x36f00bae-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T09:22:54,394 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=130, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T09:22:54,394 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=131, ppid=130, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T09:22:54,395 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32966, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T09:22:54,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-28T09:22:54,397 DEBUG [hconnection-0x6589b121-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T09:22:54,398 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32982, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T09:22:54,418 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:54,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32966 deadline: 1732785834418, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:54,418 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:54,418 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:54,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32982 deadline: 1732785834418, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:54,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32964 deadline: 1732785834418, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:54,419 DEBUG [hconnection-0x3150f53d-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T09:22:54,420 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32988, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T09:22:54,426 DEBUG [hconnection-0x38871bf3-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T09:22:54,428 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33002, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T09:22:54,438 DEBUG [hconnection-0xfc4a02c-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T09:22:54,440 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33006, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T09:22:54,471 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/068defc936dd4b1b8d51e134b7819ed5 is 50, key is test_row_0/A:col10/1732785774383/Put/seqid=0 2024-11-28T09:22:54,471 DEBUG [hconnection-0x61801db7-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T09:22:54,472 DEBUG [hconnection-0x339d9c4c-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T09:22:54,473 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33014, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T09:22:54,474 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33028, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T09:22:54,475 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:54,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33014 deadline: 1732785834475, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:54,480 DEBUG [hconnection-0x735d49b1-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T09:22:54,481 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33034, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T09:22:54,483 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:54,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33034 deadline: 1732785834483, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:54,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-28T09:22:54,501 DEBUG [hconnection-0xc65765f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T09:22:54,503 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33040, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T09:22:54,521 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:54,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32966 deadline: 1732785834520, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:54,521 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:54,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32982 deadline: 1732785834520, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:54,522 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:54,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32964 deadline: 1732785834522, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:54,547 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:54,547 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-28T09:22:54,547 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:22:54,547 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. as already flushing 2024-11-28T09:22:54,547 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:22:54,547 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:54,548 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:54,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:54,577 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:54,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33014 deadline: 1732785834576, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:54,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742370_1546 (size=12001) 2024-11-28T09:22:54,586 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:54,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33034 deadline: 1732785834586, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:54,587 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/068defc936dd4b1b8d51e134b7819ed5 2024-11-28T09:22:54,663 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/619a8fc76e864554b47d6789c813d70c is 50, key is test_row_0/B:col10/1732785774383/Put/seqid=0 2024-11-28T09:22:54,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742371_1547 (size=12001) 2024-11-28T09:22:54,684 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/619a8fc76e864554b47d6789c813d70c 2024-11-28T09:22:54,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-28T09:22:54,700 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:54,701 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-28T09:22:54,701 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:22:54,701 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. as already flushing 2024-11-28T09:22:54,701 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:22:54,701 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:54,701 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:54,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:54,723 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:54,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32982 deadline: 1732785834723, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:54,724 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:54,724 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:54,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32964 deadline: 1732785834724, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:54,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32966 deadline: 1732785834724, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:54,726 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/be08734d29dd4c13bc0db6d287f9e402 is 50, key is test_row_0/C:col10/1732785774383/Put/seqid=0 2024-11-28T09:22:54,779 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:54,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33014 deadline: 1732785834779, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:54,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742372_1548 (size=12001) 2024-11-28T09:22:54,782 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/be08734d29dd4c13bc0db6d287f9e402 2024-11-28T09:22:54,786 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/068defc936dd4b1b8d51e134b7819ed5 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/068defc936dd4b1b8d51e134b7819ed5 2024-11-28T09:22:54,789 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:54,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33034 deadline: 1732785834788, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:54,800 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/068defc936dd4b1b8d51e134b7819ed5, entries=150, sequenceid=12, filesize=11.7 K 2024-11-28T09:22:54,801 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/619a8fc76e864554b47d6789c813d70c as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/619a8fc76e864554b47d6789c813d70c 2024-11-28T09:22:54,805 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/619a8fc76e864554b47d6789c813d70c, entries=150, sequenceid=12, filesize=11.7 K 2024-11-28T09:22:54,807 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/be08734d29dd4c13bc0db6d287f9e402 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/be08734d29dd4c13bc0db6d287f9e402 2024-11-28T09:22:54,811 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/be08734d29dd4c13bc0db6d287f9e402, entries=150, sequenceid=12, filesize=11.7 K 2024-11-28T09:22:54,813 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=161.02 KB/164880 for f65103e784e3773002ae1a64e3eece97 in 428ms, sequenceid=12, compaction requested=false 2024-11-28T09:22:54,813 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f65103e784e3773002ae1a64e3eece97: 2024-11-28T09:22:54,853 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:54,854 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-28T09:22:54,854 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:22:54,854 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2837): Flushing f65103e784e3773002ae1a64e3eece97 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-28T09:22:54,854 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f65103e784e3773002ae1a64e3eece97, store=A 2024-11-28T09:22:54,854 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:54,854 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f65103e784e3773002ae1a64e3eece97, store=B 2024-11-28T09:22:54,854 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:54,854 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f65103e784e3773002ae1a64e3eece97, store=C 2024-11-28T09:22:54,855 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:54,863 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/10174d2fe7164d9c9c9d7efe37abc283 is 50, key is test_row_0/A:col10/1732785774416/Put/seqid=0 2024-11-28T09:22:54,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742373_1549 (size=12001) 2024-11-28T09:22:54,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-28T09:22:55,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on f65103e784e3773002ae1a64e3eece97 2024-11-28T09:22:55,029 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. as already flushing 2024-11-28T09:22:55,034 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:55,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32966 deadline: 1732785835033, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:55,034 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:55,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32964 deadline: 1732785835033, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:55,035 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:55,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32982 deadline: 1732785835034, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:55,087 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:55,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33014 deadline: 1732785835082, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:55,093 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:55,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33034 deadline: 1732785835091, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:55,140 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:55,140 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:55,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32966 deadline: 1732785835135, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:55,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32982 deadline: 1732785835136, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:55,140 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:55,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32964 deadline: 1732785835136, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:55,268 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/10174d2fe7164d9c9c9d7efe37abc283 2024-11-28T09:22:55,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/866691b9dbb545418783e6794dbd6970 is 50, key is test_row_0/B:col10/1732785774416/Put/seqid=0 2024-11-28T09:22:55,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742374_1550 (size=12001) 2024-11-28T09:22:55,328 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/866691b9dbb545418783e6794dbd6970 2024-11-28T09:22:55,345 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/45527bbe9f2541cd868f8a6386285020 is 50, key is test_row_0/C:col10/1732785774416/Put/seqid=0 2024-11-28T09:22:55,348 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:55,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32966 deadline: 1732785835342, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:55,349 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:55,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32964 deadline: 1732785835342, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:55,349 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:55,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32982 deadline: 1732785835343, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:55,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742375_1551 (size=12001) 2024-11-28T09:22:55,383 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/45527bbe9f2541cd868f8a6386285020 2024-11-28T09:22:55,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/10174d2fe7164d9c9c9d7efe37abc283 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/10174d2fe7164d9c9c9d7efe37abc283 2024-11-28T09:22:55,405 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/10174d2fe7164d9c9c9d7efe37abc283, entries=150, sequenceid=39, filesize=11.7 K 2024-11-28T09:22:55,405 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/866691b9dbb545418783e6794dbd6970 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/866691b9dbb545418783e6794dbd6970 2024-11-28T09:22:55,412 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/866691b9dbb545418783e6794dbd6970, entries=150, sequenceid=39, filesize=11.7 K 2024-11-28T09:22:55,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/45527bbe9f2541cd868f8a6386285020 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/45527bbe9f2541cd868f8a6386285020 2024-11-28T09:22:55,425 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/45527bbe9f2541cd868f8a6386285020, entries=150, sequenceid=39, filesize=11.7 K 2024-11-28T09:22:55,426 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for f65103e784e3773002ae1a64e3eece97 in 572ms, sequenceid=39, compaction requested=false 2024-11-28T09:22:55,426 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2538): Flush status journal for f65103e784e3773002ae1a64e3eece97: 2024-11-28T09:22:55,426 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:22:55,426 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=131 2024-11-28T09:22:55,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4106): Remote procedure done, pid=131 2024-11-28T09:22:55,428 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=131, resume processing ppid=130 2024-11-28T09:22:55,428 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=131, ppid=130, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0330 sec 2024-11-28T09:22:55,430 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=130, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees in 1.0380 sec 2024-11-28T09:22:55,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-28T09:22:55,500 INFO [Thread-2397 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 130 completed 2024-11-28T09:22:55,501 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T09:22:55,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] procedure2.ProcedureExecutor(1098): Stored pid=132, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees 2024-11-28T09:22:55,502 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=132, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T09:22:55,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-28T09:22:55,503 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=132, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T09:22:55,503 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=133, ppid=132, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T09:22:55,598 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f65103e784e3773002ae1a64e3eece97 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-28T09:22:55,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on f65103e784e3773002ae1a64e3eece97 2024-11-28T09:22:55,601 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f65103e784e3773002ae1a64e3eece97, store=A 2024-11-28T09:22:55,601 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:55,601 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f65103e784e3773002ae1a64e3eece97, store=B 2024-11-28T09:22:55,601 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:55,601 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f65103e784e3773002ae1a64e3eece97, store=C 2024-11-28T09:22:55,601 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:55,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-28T09:22:55,611 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/0db4b629d5f24616ba7f2b48afb28f20 is 50, key is test_row_0/A:col10/1732785775597/Put/seqid=0 2024-11-28T09:22:55,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742376_1552 (size=14341) 2024-11-28T09:22:55,624 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/0db4b629d5f24616ba7f2b48afb28f20 2024-11-28T09:22:55,638 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/4c4dc96d134549c1978c77bb8b249209 is 50, key is test_row_0/B:col10/1732785775597/Put/seqid=0 2024-11-28T09:22:55,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742377_1553 (size=12001) 2024-11-28T09:22:55,668 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:55,668 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-28T09:22:55,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:22:55,669 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. as already flushing 2024-11-28T09:22:55,669 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:22:55,669 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:55,669 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:55,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:55,695 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:55,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32982 deadline: 1732785835687, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:55,702 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:55,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33034 deadline: 1732785835691, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:55,702 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:55,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33014 deadline: 1732785835692, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:55,703 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:55,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32964 deadline: 1732785835694, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:55,704 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:55,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32966 deadline: 1732785835694, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:55,803 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:55,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32982 deadline: 1732785835796, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:55,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-28T09:22:55,812 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:55,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33034 deadline: 1732785835804, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:55,812 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:55,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33014 deadline: 1732785835804, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:55,816 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:55,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32964 deadline: 1732785835809, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:55,816 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:55,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32966 deadline: 1732785835809, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:55,820 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:55,820 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-28T09:22:55,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:22:55,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. as already flushing 2024-11-28T09:22:55,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:22:55,821 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:55,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:55,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:55,973 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:55,974 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-28T09:22:55,974 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:22:55,974 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. as already flushing 2024-11-28T09:22:55,974 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:22:55,974 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:55,974 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:55,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:56,009 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:56,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32982 deadline: 1732785836004, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:56,030 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:56,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33014 deadline: 1732785836014, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:56,030 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:56,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33034 deadline: 1732785836014, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:56,031 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:56,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32966 deadline: 1732785836028, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:56,037 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:56,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32964 deadline: 1732785836031, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:56,068 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/4c4dc96d134549c1978c77bb8b249209 2024-11-28T09:22:56,079 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/f0fd41e87a644c239476d25e896b54b7 is 50, key is test_row_0/C:col10/1732785775597/Put/seqid=0 2024-11-28T09:22:56,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-28T09:22:56,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742378_1554 (size=12001) 2024-11-28T09:22:56,126 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/f0fd41e87a644c239476d25e896b54b7 2024-11-28T09:22:56,127 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:56,127 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-28T09:22:56,127 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:22:56,127 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. as already flushing 2024-11-28T09:22:56,127 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:22:56,128 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:56,128 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:56,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:56,141 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/0db4b629d5f24616ba7f2b48afb28f20 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/0db4b629d5f24616ba7f2b48afb28f20 2024-11-28T09:22:56,148 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/0db4b629d5f24616ba7f2b48afb28f20, entries=200, sequenceid=51, filesize=14.0 K 2024-11-28T09:22:56,149 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/4c4dc96d134549c1978c77bb8b249209 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/4c4dc96d134549c1978c77bb8b249209 2024-11-28T09:22:56,156 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/4c4dc96d134549c1978c77bb8b249209, entries=150, sequenceid=51, filesize=11.7 K 2024-11-28T09:22:56,157 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/f0fd41e87a644c239476d25e896b54b7 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/f0fd41e87a644c239476d25e896b54b7 2024-11-28T09:22:56,161 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/f0fd41e87a644c239476d25e896b54b7, entries=150, sequenceid=51, filesize=11.7 K 2024-11-28T09:22:56,165 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for f65103e784e3773002ae1a64e3eece97 in 567ms, sequenceid=51, compaction requested=true 2024-11-28T09:22:56,165 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f65103e784e3773002ae1a64e3eece97: 2024-11-28T09:22:56,166 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:22:56,166 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f65103e784e3773002ae1a64e3eece97:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T09:22:56,166 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:22:56,166 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:22:56,166 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38343 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:22:56,167 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1540): f65103e784e3773002ae1a64e3eece97/A is initiating minor compaction (all files) 2024-11-28T09:22:56,167 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f65103e784e3773002ae1a64e3eece97/A in TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:22:56,167 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/068defc936dd4b1b8d51e134b7819ed5, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/10174d2fe7164d9c9c9d7efe37abc283, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/0db4b629d5f24616ba7f2b48afb28f20] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp, totalSize=37.4 K 2024-11-28T09:22:56,167 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 068defc936dd4b1b8d51e134b7819ed5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=1732785774379 2024-11-28T09:22:56,167 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f65103e784e3773002ae1a64e3eece97:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T09:22:56,168 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:22:56,168 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f65103e784e3773002ae1a64e3eece97:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T09:22:56,168 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 10174d2fe7164d9c9c9d7efe37abc283, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1732785774413 2024-11-28T09:22:56,168 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:22:56,168 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): f65103e784e3773002ae1a64e3eece97/B is initiating minor compaction (all files) 2024-11-28T09:22:56,168 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f65103e784e3773002ae1a64e3eece97/B in TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:22:56,168 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/619a8fc76e864554b47d6789c813d70c, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/866691b9dbb545418783e6794dbd6970, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/4c4dc96d134549c1978c77bb8b249209] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp, totalSize=35.2 K 2024-11-28T09:22:56,168 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:22:56,168 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0db4b629d5f24616ba7f2b48afb28f20, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732785775029 2024-11-28T09:22:56,168 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 619a8fc76e864554b47d6789c813d70c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=1732785774379 2024-11-28T09:22:56,168 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 866691b9dbb545418783e6794dbd6970, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1732785774413 2024-11-28T09:22:56,169 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 4c4dc96d134549c1978c77bb8b249209, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732785775029 2024-11-28T09:22:56,191 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f65103e784e3773002ae1a64e3eece97#B#compaction#468 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:22:56,191 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/4d30ded1c0c248048fee02b20f3dcc7a is 50, key is test_row_0/B:col10/1732785775597/Put/seqid=0 2024-11-28T09:22:56,194 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f65103e784e3773002ae1a64e3eece97#A#compaction#469 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:22:56,195 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/ada7d11180e04447b73abea8fd6b362e is 50, key is test_row_0/A:col10/1732785775597/Put/seqid=0 2024-11-28T09:22:56,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742379_1555 (size=12104) 2024-11-28T09:22:56,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742380_1556 (size=12104) 2024-11-28T09:22:56,281 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:56,282 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-28T09:22:56,282 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:22:56,282 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2837): Flushing f65103e784e3773002ae1a64e3eece97 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-28T09:22:56,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f65103e784e3773002ae1a64e3eece97, store=A 2024-11-28T09:22:56,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:56,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f65103e784e3773002ae1a64e3eece97, store=B 2024-11-28T09:22:56,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:56,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f65103e784e3773002ae1a64e3eece97, store=C 2024-11-28T09:22:56,283 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:56,302 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/f404c24957174f53a893e160c8d33dde is 50, key is test_row_0/A:col10/1732785775691/Put/seqid=0 2024-11-28T09:22:56,319 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. as already flushing 2024-11-28T09:22:56,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on f65103e784e3773002ae1a64e3eece97 2024-11-28T09:22:56,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742381_1557 (size=12001) 2024-11-28T09:22:56,351 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:56,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32966 deadline: 1732785836344, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:56,355 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:56,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33034 deadline: 1732785836344, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:56,355 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:56,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32982 deadline: 1732785836348, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:56,360 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:56,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32964 deadline: 1732785836351, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:56,361 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:56,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33014 deadline: 1732785836352, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:56,467 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:56,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32966 deadline: 1732785836454, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:56,468 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:56,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33034 deadline: 1732785836457, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:56,468 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:56,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32982 deadline: 1732785836457, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:56,470 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:56,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32964 deadline: 1732785836461, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:56,471 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:56,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33014 deadline: 1732785836462, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:56,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-28T09:22:56,646 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/4d30ded1c0c248048fee02b20f3dcc7a as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/4d30ded1c0c248048fee02b20f3dcc7a 2024-11-28T09:22:56,653 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f65103e784e3773002ae1a64e3eece97/B of f65103e784e3773002ae1a64e3eece97 into 4d30ded1c0c248048fee02b20f3dcc7a(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:22:56,653 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f65103e784e3773002ae1a64e3eece97: 2024-11-28T09:22:56,653 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97., storeName=f65103e784e3773002ae1a64e3eece97/B, priority=13, startTime=1732785776166; duration=0sec 2024-11-28T09:22:56,653 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:22:56,653 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f65103e784e3773002ae1a64e3eece97:B 2024-11-28T09:22:56,653 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:22:56,655 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:22:56,655 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): f65103e784e3773002ae1a64e3eece97/C is initiating minor compaction (all files) 2024-11-28T09:22:56,655 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f65103e784e3773002ae1a64e3eece97/C in TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:22:56,655 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/be08734d29dd4c13bc0db6d287f9e402, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/45527bbe9f2541cd868f8a6386285020, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/f0fd41e87a644c239476d25e896b54b7] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp, totalSize=35.2 K 2024-11-28T09:22:56,660 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting be08734d29dd4c13bc0db6d287f9e402, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=1732785774379 2024-11-28T09:22:56,660 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 45527bbe9f2541cd868f8a6386285020, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1732785774413 2024-11-28T09:22:56,661 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting f0fd41e87a644c239476d25e896b54b7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732785775029 2024-11-28T09:22:56,672 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f65103e784e3773002ae1a64e3eece97#C#compaction#471 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:22:56,673 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/3488c88d1b6d4517ae68536487332d95 is 50, key is test_row_0/C:col10/1732785775597/Put/seqid=0 2024-11-28T09:22:56,675 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/ada7d11180e04447b73abea8fd6b362e as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/ada7d11180e04447b73abea8fd6b362e 2024-11-28T09:22:56,679 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:56,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32982 deadline: 1732785836669, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:56,680 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:56,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33034 deadline: 1732785836670, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:56,680 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f65103e784e3773002ae1a64e3eece97/A of f65103e784e3773002ae1a64e3eece97 into ada7d11180e04447b73abea8fd6b362e(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:22:56,680 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f65103e784e3773002ae1a64e3eece97: 2024-11-28T09:22:56,680 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:56,680 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97., storeName=f65103e784e3773002ae1a64e3eece97/A, priority=13, startTime=1732785776165; duration=0sec 2024-11-28T09:22:56,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32966 deadline: 1732785836670, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:56,680 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:22:56,680 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f65103e784e3773002ae1a64e3eece97:A 2024-11-28T09:22:56,681 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:56,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32964 deadline: 1732785836672, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:56,683 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:56,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33014 deadline: 1732785836674, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:56,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742382_1558 (size=12104) 2024-11-28T09:22:56,717 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/3488c88d1b6d4517ae68536487332d95 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/3488c88d1b6d4517ae68536487332d95 2024-11-28T09:22:56,723 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f65103e784e3773002ae1a64e3eece97/C of f65103e784e3773002ae1a64e3eece97 into 3488c88d1b6d4517ae68536487332d95(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:22:56,723 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f65103e784e3773002ae1a64e3eece97: 2024-11-28T09:22:56,723 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97., storeName=f65103e784e3773002ae1a64e3eece97/C, priority=13, startTime=1732785776168; duration=0sec 2024-11-28T09:22:56,723 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:22:56,723 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f65103e784e3773002ae1a64e3eece97:C 2024-11-28T09:22:56,750 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=75 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/f404c24957174f53a893e160c8d33dde 2024-11-28T09:22:56,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/4eee4e40473c4aa8a5ee489671b6caf7 is 50, key is test_row_0/B:col10/1732785775691/Put/seqid=0 2024-11-28T09:22:56,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742383_1559 (size=12001) 2024-11-28T09:22:56,790 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=75 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/4eee4e40473c4aa8a5ee489671b6caf7 2024-11-28T09:22:56,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/6e20467852be44608fa81cacc44e485e is 50, key is test_row_0/C:col10/1732785775691/Put/seqid=0 2024-11-28T09:22:56,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742384_1560 (size=12001) 2024-11-28T09:22:56,848 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=75 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/6e20467852be44608fa81cacc44e485e 2024-11-28T09:22:56,866 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/f404c24957174f53a893e160c8d33dde as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/f404c24957174f53a893e160c8d33dde 2024-11-28T09:22:56,893 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/f404c24957174f53a893e160c8d33dde, entries=150, sequenceid=75, filesize=11.7 K 2024-11-28T09:22:56,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/4eee4e40473c4aa8a5ee489671b6caf7 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/4eee4e40473c4aa8a5ee489671b6caf7 2024-11-28T09:22:56,899 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/4eee4e40473c4aa8a5ee489671b6caf7, entries=150, sequenceid=75, filesize=11.7 K 2024-11-28T09:22:56,901 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/6e20467852be44608fa81cacc44e485e as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/6e20467852be44608fa81cacc44e485e 2024-11-28T09:22:56,906 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/6e20467852be44608fa81cacc44e485e, entries=150, sequenceid=75, filesize=11.7 K 2024-11-28T09:22:56,909 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for f65103e784e3773002ae1a64e3eece97 in 627ms, sequenceid=75, compaction requested=false 2024-11-28T09:22:56,909 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2538): Flush status journal for f65103e784e3773002ae1a64e3eece97: 2024-11-28T09:22:56,909 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:22:56,909 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=133 2024-11-28T09:22:56,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4106): Remote procedure done, pid=133 2024-11-28T09:22:56,911 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=133, resume processing ppid=132 2024-11-28T09:22:56,912 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=133, ppid=132, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4070 sec 2024-11-28T09:22:56,913 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=132, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees in 1.4110 sec 2024-11-28T09:22:56,987 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f65103e784e3773002ae1a64e3eece97 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-28T09:22:56,988 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f65103e784e3773002ae1a64e3eece97, store=A 2024-11-28T09:22:56,988 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:56,988 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f65103e784e3773002ae1a64e3eece97, store=B 2024-11-28T09:22:56,988 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:56,988 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f65103e784e3773002ae1a64e3eece97, store=C 2024-11-28T09:22:56,988 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:56,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on f65103e784e3773002ae1a64e3eece97 2024-11-28T09:22:56,999 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/a85ac5a39d8f45cf9f4c950c5d8a47fc is 50, key is test_row_0/A:col10/1732785776350/Put/seqid=0 2024-11-28T09:22:57,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742385_1561 (size=14341) 2024-11-28T09:22:57,051 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:57,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33014 deadline: 1732785837041, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:57,051 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:57,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32982 deadline: 1732785837042, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:57,053 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:57,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33034 deadline: 1732785837042, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:57,054 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:57,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32964 deadline: 1732785837044, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:57,054 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:57,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32966 deadline: 1732785837045, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:57,158 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:57,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32982 deadline: 1732785837152, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:57,158 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:57,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33014 deadline: 1732785837153, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:57,163 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:57,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33034 deadline: 1732785837154, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:57,164 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:57,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32964 deadline: 1732785837155, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:57,165 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:57,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32966 deadline: 1732785837155, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:57,366 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:57,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33014 deadline: 1732785837359, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:57,367 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:57,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32982 deadline: 1732785837361, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:57,369 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:57,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32964 deadline: 1732785837366, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:57,369 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:57,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32966 deadline: 1732785837366, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:57,373 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:57,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33034 deadline: 1732785837366, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:57,444 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/a85ac5a39d8f45cf9f4c950c5d8a47fc 2024-11-28T09:22:57,473 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/4a693697bbdb45bc905aabd9bacc46aa is 50, key is test_row_0/B:col10/1732785776350/Put/seqid=0 2024-11-28T09:22:57,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742386_1562 (size=12001) 2024-11-28T09:22:57,519 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/4a693697bbdb45bc905aabd9bacc46aa 2024-11-28T09:22:57,539 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/b0759490089d4821b0ccf8485a66a9e8 is 50, key is test_row_0/C:col10/1732785776350/Put/seqid=0 2024-11-28T09:22:57,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742387_1563 (size=12001) 2024-11-28T09:22:57,576 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=91 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/b0759490089d4821b0ccf8485a66a9e8 2024-11-28T09:22:57,582 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/a85ac5a39d8f45cf9f4c950c5d8a47fc as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/a85ac5a39d8f45cf9f4c950c5d8a47fc 2024-11-28T09:22:57,593 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/a85ac5a39d8f45cf9f4c950c5d8a47fc, entries=200, sequenceid=91, filesize=14.0 K 2024-11-28T09:22:57,593 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/4a693697bbdb45bc905aabd9bacc46aa as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/4a693697bbdb45bc905aabd9bacc46aa 2024-11-28T09:22:57,598 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/4a693697bbdb45bc905aabd9bacc46aa, entries=150, sequenceid=91, filesize=11.7 K 2024-11-28T09:22:57,599 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/b0759490089d4821b0ccf8485a66a9e8 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/b0759490089d4821b0ccf8485a66a9e8 2024-11-28T09:22:57,604 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/b0759490089d4821b0ccf8485a66a9e8, entries=150, sequenceid=91, filesize=11.7 K 2024-11-28T09:22:57,605 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for f65103e784e3773002ae1a64e3eece97 in 618ms, sequenceid=91, compaction requested=true 2024-11-28T09:22:57,605 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f65103e784e3773002ae1a64e3eece97: 2024-11-28T09:22:57,605 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f65103e784e3773002ae1a64e3eece97:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T09:22:57,605 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:22:57,605 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f65103e784e3773002ae1a64e3eece97:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T09:22:57,605 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:22:57,605 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:22:57,605 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f65103e784e3773002ae1a64e3eece97:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T09:22:57,605 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:22:57,605 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:22:57,606 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:22:57,606 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): f65103e784e3773002ae1a64e3eece97/B is initiating minor compaction (all files) 2024-11-28T09:22:57,606 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f65103e784e3773002ae1a64e3eece97/B in TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:22:57,606 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/4d30ded1c0c248048fee02b20f3dcc7a, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/4eee4e40473c4aa8a5ee489671b6caf7, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/4a693697bbdb45bc905aabd9bacc46aa] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp, totalSize=35.3 K 2024-11-28T09:22:57,607 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38446 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:22:57,607 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1540): f65103e784e3773002ae1a64e3eece97/A is initiating minor compaction (all files) 2024-11-28T09:22:57,607 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f65103e784e3773002ae1a64e3eece97/A in TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:22:57,607 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/ada7d11180e04447b73abea8fd6b362e, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/f404c24957174f53a893e160c8d33dde, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/a85ac5a39d8f45cf9f4c950c5d8a47fc] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp, totalSize=37.5 K 2024-11-28T09:22:57,607 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting ada7d11180e04447b73abea8fd6b362e, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732785775029 2024-11-28T09:22:57,607 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 4d30ded1c0c248048fee02b20f3dcc7a, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732785775029 2024-11-28T09:22:57,608 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting f404c24957174f53a893e160c8d33dde, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1732785775681 2024-11-28T09:22:57,608 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 4eee4e40473c4aa8a5ee489671b6caf7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1732785775681 2024-11-28T09:22:57,608 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting a85ac5a39d8f45cf9f4c950c5d8a47fc, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1732785776337 2024-11-28T09:22:57,608 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 4a693697bbdb45bc905aabd9bacc46aa, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1732785776337 2024-11-28T09:22:57,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-28T09:22:57,611 INFO [Thread-2397 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 132 completed 2024-11-28T09:22:57,613 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T09:22:57,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] procedure2.ProcedureExecutor(1098): Stored pid=134, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees 2024-11-28T09:22:57,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-28T09:22:57,615 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=134, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T09:22:57,618 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=134, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T09:22:57,618 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=135, ppid=134, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T09:22:57,629 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f65103e784e3773002ae1a64e3eece97#A#compaction#477 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:22:57,630 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/70790720243d4bbb8b6a7e87d231b1af is 50, key is test_row_0/A:col10/1732785776350/Put/seqid=0 2024-11-28T09:22:57,642 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f65103e784e3773002ae1a64e3eece97#B#compaction#478 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:22:57,642 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/5c207240939240adb56f1dd757231951 is 50, key is test_row_0/B:col10/1732785776350/Put/seqid=0 2024-11-28T09:22:57,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on f65103e784e3773002ae1a64e3eece97 2024-11-28T09:22:57,677 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f65103e784e3773002ae1a64e3eece97 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-28T09:22:57,679 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f65103e784e3773002ae1a64e3eece97, store=A 2024-11-28T09:22:57,680 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:57,680 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f65103e784e3773002ae1a64e3eece97, store=B 2024-11-28T09:22:57,680 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:57,680 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f65103e784e3773002ae1a64e3eece97, store=C 2024-11-28T09:22:57,680 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:57,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742388_1564 (size=12207) 2024-11-28T09:22:57,694 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/70790720243d4bbb8b6a7e87d231b1af as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/70790720243d4bbb8b6a7e87d231b1af 2024-11-28T09:22:57,708 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:57,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33034 deadline: 1732785837697, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:57,709 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:57,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32982 deadline: 1732785837698, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:57,709 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:57,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33014 deadline: 1732785837699, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:57,710 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:57,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32964 deadline: 1732785837702, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:57,710 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:57,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32966 deadline: 1732785837703, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:57,713 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f65103e784e3773002ae1a64e3eece97/A of f65103e784e3773002ae1a64e3eece97 into 70790720243d4bbb8b6a7e87d231b1af(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:22:57,713 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f65103e784e3773002ae1a64e3eece97: 2024-11-28T09:22:57,713 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97., storeName=f65103e784e3773002ae1a64e3eece97/A, priority=13, startTime=1732785777605; duration=0sec 2024-11-28T09:22:57,714 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:22:57,714 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f65103e784e3773002ae1a64e3eece97:A 2024-11-28T09:22:57,714 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:22:57,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-28T09:22:57,717 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:22:57,717 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1540): f65103e784e3773002ae1a64e3eece97/C is initiating minor compaction (all files) 2024-11-28T09:22:57,717 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f65103e784e3773002ae1a64e3eece97/C in TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:22:57,717 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/3488c88d1b6d4517ae68536487332d95, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/6e20467852be44608fa81cacc44e485e, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/b0759490089d4821b0ccf8485a66a9e8] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp, totalSize=35.3 K 2024-11-28T09:22:57,718 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3488c88d1b6d4517ae68536487332d95, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1732785775029 2024-11-28T09:22:57,718 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6e20467852be44608fa81cacc44e485e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=75, earliestPutTs=1732785775681 2024-11-28T09:22:57,719 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting b0759490089d4821b0ccf8485a66a9e8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1732785776337 2024-11-28T09:22:57,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742389_1565 (size=12207) 2024-11-28T09:22:57,724 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/064891f3bf07452abf91c75e3da6fbc5 is 50, key is test_row_0/A:col10/1732785777675/Put/seqid=0 2024-11-28T09:22:57,731 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f65103e784e3773002ae1a64e3eece97#C#compaction#480 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:22:57,732 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/62de3eaad347455daf304ff033f206f9 is 50, key is test_row_0/C:col10/1732785776350/Put/seqid=0 2024-11-28T09:22:57,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742390_1566 (size=16681) 2024-11-28T09:22:57,756 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/064891f3bf07452abf91c75e3da6fbc5 2024-11-28T09:22:57,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742391_1567 (size=12207) 2024-11-28T09:22:57,766 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-28T09:22:57,770 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:57,770 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-28T09:22:57,770 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:22:57,771 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. as already flushing 2024-11-28T09:22:57,771 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:22:57,771 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:57,771 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:57,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:57,772 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/44161acacce2469ba16967b5cd758938 is 50, key is test_row_0/B:col10/1732785777675/Put/seqid=0 2024-11-28T09:22:57,777 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/62de3eaad347455daf304ff033f206f9 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/62de3eaad347455daf304ff033f206f9 2024-11-28T09:22:57,782 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f65103e784e3773002ae1a64e3eece97/C of f65103e784e3773002ae1a64e3eece97 into 62de3eaad347455daf304ff033f206f9(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:22:57,782 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f65103e784e3773002ae1a64e3eece97: 2024-11-28T09:22:57,782 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97., storeName=f65103e784e3773002ae1a64e3eece97/C, priority=13, startTime=1732785777605; duration=0sec 2024-11-28T09:22:57,782 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:22:57,782 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f65103e784e3773002ae1a64e3eece97:C 2024-11-28T09:22:57,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742392_1568 (size=12001) 2024-11-28T09:22:57,809 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/44161acacce2469ba16967b5cd758938 2024-11-28T09:22:57,813 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:57,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33034 deadline: 1732785837809, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:57,813 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:57,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33014 deadline: 1732785837810, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:57,814 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:57,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32964 deadline: 1732785837811, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:57,814 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:57,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32966 deadline: 1732785837811, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:57,817 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:57,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32982 deadline: 1732785837814, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:57,822 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/491da61e21324cde8f2512eb67bb3881 is 50, key is test_row_0/C:col10/1732785777675/Put/seqid=0 2024-11-28T09:22:57,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742393_1569 (size=12001) 2024-11-28T09:22:57,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-28T09:22:57,926 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:57,927 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-28T09:22:57,927 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:22:57,927 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. as already flushing 2024-11-28T09:22:57,927 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:22:57,927 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:57,927 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:57,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:58,020 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:58,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33014 deadline: 1732785838015, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:58,021 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:58,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33034 deadline: 1732785838016, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:58,022 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:58,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32964 deadline: 1732785838017, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:58,023 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:58,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32966 deadline: 1732785838017, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:58,023 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:58,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32982 deadline: 1732785838018, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:58,080 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:58,080 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-28T09:22:58,080 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:22:58,080 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. as already flushing 2024-11-28T09:22:58,081 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:22:58,081 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:58,081 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:58,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:58,127 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/5c207240939240adb56f1dd757231951 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/5c207240939240adb56f1dd757231951 2024-11-28T09:22:58,133 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f65103e784e3773002ae1a64e3eece97/B of f65103e784e3773002ae1a64e3eece97 into 5c207240939240adb56f1dd757231951(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:22:58,133 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f65103e784e3773002ae1a64e3eece97: 2024-11-28T09:22:58,133 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97., storeName=f65103e784e3773002ae1a64e3eece97/B, priority=13, startTime=1732785777605; duration=0sec 2024-11-28T09:22:58,133 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:22:58,133 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f65103e784e3773002ae1a64e3eece97:B 2024-11-28T09:22:58,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-28T09:22:58,233 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:58,234 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-28T09:22:58,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:22:58,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. as already flushing 2024-11-28T09:22:58,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:22:58,234 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:58,234 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:58,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:58,258 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=117 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/491da61e21324cde8f2512eb67bb3881 2024-11-28T09:22:58,263 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/064891f3bf07452abf91c75e3da6fbc5 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/064891f3bf07452abf91c75e3da6fbc5 2024-11-28T09:22:58,269 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/064891f3bf07452abf91c75e3da6fbc5, entries=250, sequenceid=117, filesize=16.3 K 2024-11-28T09:22:58,270 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/44161acacce2469ba16967b5cd758938 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/44161acacce2469ba16967b5cd758938 2024-11-28T09:22:58,300 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/44161acacce2469ba16967b5cd758938, entries=150, sequenceid=117, filesize=11.7 K 2024-11-28T09:22:58,302 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/491da61e21324cde8f2512eb67bb3881 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/491da61e21324cde8f2512eb67bb3881 2024-11-28T09:22:58,321 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/491da61e21324cde8f2512eb67bb3881, entries=150, sequenceid=117, filesize=11.7 K 2024-11-28T09:22:58,322 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for f65103e784e3773002ae1a64e3eece97 in 645ms, sequenceid=117, compaction requested=false 2024-11-28T09:22:58,322 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f65103e784e3773002ae1a64e3eece97: 2024-11-28T09:22:58,333 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f65103e784e3773002ae1a64e3eece97 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-28T09:22:58,333 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f65103e784e3773002ae1a64e3eece97, store=A 2024-11-28T09:22:58,334 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:58,334 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f65103e784e3773002ae1a64e3eece97, store=B 2024-11-28T09:22:58,334 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:58,334 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f65103e784e3773002ae1a64e3eece97, store=C 2024-11-28T09:22:58,334 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:58,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on f65103e784e3773002ae1a64e3eece97 2024-11-28T09:22:58,347 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/89fbe02bdc324e35816858d25367aa7b is 50, key is test_row_0/A:col10/1732785777702/Put/seqid=0 2024-11-28T09:22:58,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742394_1570 (size=14391) 2024-11-28T09:22:58,383 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=131 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/89fbe02bdc324e35816858d25367aa7b 2024-11-28T09:22:58,388 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:58,389 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-28T09:22:58,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:22:58,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. as already flushing 2024-11-28T09:22:58,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:22:58,389 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:58,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:58,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:58,396 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/9869ed8131bd43e49c3df39eb139fe71 is 50, key is test_row_0/B:col10/1732785777702/Put/seqid=0 2024-11-28T09:22:58,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742395_1571 (size=12051) 2024-11-28T09:22:58,429 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:58,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32964 deadline: 1732785838406, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:58,429 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:58,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32982 deadline: 1732785838407, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:58,431 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=131 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/9869ed8131bd43e49c3df39eb139fe71 2024-11-28T09:22:58,443 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:58,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33034 deadline: 1732785838428, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:58,444 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:58,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32966 deadline: 1732785838429, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:58,445 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:58,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33014 deadline: 1732785838431, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:58,450 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/20f442b7dd704a00a2c152752153b702 is 50, key is test_row_0/C:col10/1732785777702/Put/seqid=0 2024-11-28T09:22:58,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742396_1572 (size=12051) 2024-11-28T09:22:58,490 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=131 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/20f442b7dd704a00a2c152752153b702 2024-11-28T09:22:58,500 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/89fbe02bdc324e35816858d25367aa7b as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/89fbe02bdc324e35816858d25367aa7b 2024-11-28T09:22:58,509 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/89fbe02bdc324e35816858d25367aa7b, entries=200, sequenceid=131, filesize=14.1 K 2024-11-28T09:22:58,510 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/9869ed8131bd43e49c3df39eb139fe71 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/9869ed8131bd43e49c3df39eb139fe71 2024-11-28T09:22:58,528 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/9869ed8131bd43e49c3df39eb139fe71, entries=150, sequenceid=131, filesize=11.8 K 2024-11-28T09:22:58,529 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/20f442b7dd704a00a2c152752153b702 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/20f442b7dd704a00a2c152752153b702 2024-11-28T09:22:58,533 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/20f442b7dd704a00a2c152752153b702, entries=150, sequenceid=131, filesize=11.8 K 2024-11-28T09:22:58,534 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for f65103e784e3773002ae1a64e3eece97 in 201ms, sequenceid=131, compaction requested=true 2024-11-28T09:22:58,534 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f65103e784e3773002ae1a64e3eece97: 2024-11-28T09:22:58,535 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f65103e784e3773002ae1a64e3eece97:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T09:22:58,535 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:22:58,535 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f65103e784e3773002ae1a64e3eece97:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T09:22:58,535 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:22:58,535 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:22:58,535 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f65103e784e3773002ae1a64e3eece97:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T09:22:58,535 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-28T09:22:58,535 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:22:58,539 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36259 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:22:58,539 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): f65103e784e3773002ae1a64e3eece97/B is initiating minor compaction (all files) 2024-11-28T09:22:58,539 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f65103e784e3773002ae1a64e3eece97/B in TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:22:58,539 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/5c207240939240adb56f1dd757231951, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/44161acacce2469ba16967b5cd758938, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/9869ed8131bd43e49c3df39eb139fe71] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp, totalSize=35.4 K 2024-11-28T09:22:58,539 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 43279 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:22:58,539 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1540): f65103e784e3773002ae1a64e3eece97/A is initiating minor compaction (all files) 2024-11-28T09:22:58,539 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f65103e784e3773002ae1a64e3eece97/A in TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:22:58,539 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/70790720243d4bbb8b6a7e87d231b1af, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/064891f3bf07452abf91c75e3da6fbc5, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/89fbe02bdc324e35816858d25367aa7b] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp, totalSize=42.3 K 2024-11-28T09:22:58,541 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 5c207240939240adb56f1dd757231951, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1732785776337 2024-11-28T09:22:58,542 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:58,543 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 44161acacce2469ba16967b5cd758938, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1732785777041 2024-11-28T09:22:58,545 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-28T09:22:58,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:22:58,546 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2837): Flushing f65103e784e3773002ae1a64e3eece97 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-28T09:22:58,549 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 70790720243d4bbb8b6a7e87d231b1af, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1732785776337 2024-11-28T09:22:58,549 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 9869ed8131bd43e49c3df39eb139fe71, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1732785777696 2024-11-28T09:22:58,549 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f65103e784e3773002ae1a64e3eece97, store=A 2024-11-28T09:22:58,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on f65103e784e3773002ae1a64e3eece97 2024-11-28T09:22:58,549 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. as already flushing 2024-11-28T09:22:58,550 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 064891f3bf07452abf91c75e3da6fbc5, keycount=250, bloomtype=ROW, size=16.3 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1732785777040 2024-11-28T09:22:58,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:58,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f65103e784e3773002ae1a64e3eece97, store=B 2024-11-28T09:22:58,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:58,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f65103e784e3773002ae1a64e3eece97, store=C 2024-11-28T09:22:58,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:58,555 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 89fbe02bdc324e35816858d25367aa7b, keycount=200, bloomtype=ROW, size=14.1 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1732785777696 2024-11-28T09:22:58,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/8ad8c9fb6ff44320ab700476118e9b17 is 50, key is test_row_0/A:col10/1732785778545/Put/seqid=0 2024-11-28T09:22:58,578 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:58,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33014 deadline: 1732785838563, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:58,578 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:58,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32964 deadline: 1732785838564, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:58,579 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:58,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32982 deadline: 1732785838570, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:58,582 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f65103e784e3773002ae1a64e3eece97#B#compaction#487 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:22:58,583 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/a3b416310e59405587cb26e6b61f4375 is 50, key is test_row_0/B:col10/1732785777702/Put/seqid=0 2024-11-28T09:22:58,589 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:58,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33034 deadline: 1732785838580, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:58,590 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:58,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32966 deadline: 1732785838580, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:58,591 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f65103e784e3773002ae1a64e3eece97#A#compaction#488 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:22:58,593 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/3588334326a74af38aa07b85af223fed is 50, key is test_row_0/A:col10/1732785777702/Put/seqid=0 2024-11-28T09:22:58,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742397_1573 (size=14541) 2024-11-28T09:22:58,633 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/8ad8c9fb6ff44320ab700476118e9b17 2024-11-28T09:22:58,685 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:58,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32964 deadline: 1732785838680, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:58,686 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:58,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33014 deadline: 1732785838680, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:58,686 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:58,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32982 deadline: 1732785838681, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:58,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742398_1574 (size=12359) 2024-11-28T09:22:58,699 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:58,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33034 deadline: 1732785838691, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:58,709 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:58,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32966 deadline: 1732785838693, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:58,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742399_1575 (size=12359) 2024-11-28T09:22:58,719 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/9bf94fe2f6924e17977677f76cf499c4 is 50, key is test_row_0/B:col10/1732785778545/Put/seqid=0 2024-11-28T09:22:58,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-28T09:22:58,724 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/3588334326a74af38aa07b85af223fed as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/3588334326a74af38aa07b85af223fed 2024-11-28T09:22:58,732 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f65103e784e3773002ae1a64e3eece97/A of f65103e784e3773002ae1a64e3eece97 into 3588334326a74af38aa07b85af223fed(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:22:58,732 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f65103e784e3773002ae1a64e3eece97: 2024-11-28T09:22:58,732 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97., storeName=f65103e784e3773002ae1a64e3eece97/A, priority=13, startTime=1732785778535; duration=0sec 2024-11-28T09:22:58,733 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:22:58,733 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f65103e784e3773002ae1a64e3eece97:A 2024-11-28T09:22:58,733 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:22:58,734 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36259 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:22:58,734 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1540): f65103e784e3773002ae1a64e3eece97/C is initiating minor compaction (all files) 2024-11-28T09:22:58,734 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f65103e784e3773002ae1a64e3eece97/C in TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:22:58,734 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/62de3eaad347455daf304ff033f206f9, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/491da61e21324cde8f2512eb67bb3881, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/20f442b7dd704a00a2c152752153b702] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp, totalSize=35.4 K 2024-11-28T09:22:58,734 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 62de3eaad347455daf304ff033f206f9, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=91, earliestPutTs=1732785776337 2024-11-28T09:22:58,735 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 491da61e21324cde8f2512eb67bb3881, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=117, earliestPutTs=1732785777041 2024-11-28T09:22:58,735 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 20f442b7dd704a00a2c152752153b702, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1732785777696 2024-11-28T09:22:58,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742400_1576 (size=12151) 2024-11-28T09:22:58,753 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/9bf94fe2f6924e17977677f76cf499c4 2024-11-28T09:22:58,759 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f65103e784e3773002ae1a64e3eece97#C#compaction#490 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:22:58,761 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/58ba4481c9b3429780c3db284fc056dd is 50, key is test_row_0/C:col10/1732785777702/Put/seqid=0 2024-11-28T09:22:58,771 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/0bb569162e514fda9d927c9d9cb40e04 is 50, key is test_row_0/C:col10/1732785778545/Put/seqid=0 2024-11-28T09:22:58,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742401_1577 (size=12359) 2024-11-28T09:22:58,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742402_1578 (size=12151) 2024-11-28T09:22:58,895 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:58,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32982 deadline: 1732785838892, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:58,896 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:58,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33014 deadline: 1732785838893, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:58,896 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:58,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32964 deadline: 1732785838893, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:58,905 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:58,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33034 deadline: 1732785838901, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:58,923 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:58,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32966 deadline: 1732785838916, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:58,985 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-28T09:22:59,107 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/a3b416310e59405587cb26e6b61f4375 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/a3b416310e59405587cb26e6b61f4375 2024-11-28T09:22:59,123 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f65103e784e3773002ae1a64e3eece97/B of f65103e784e3773002ae1a64e3eece97 into a3b416310e59405587cb26e6b61f4375(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:22:59,123 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f65103e784e3773002ae1a64e3eece97: 2024-11-28T09:22:59,123 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97., storeName=f65103e784e3773002ae1a64e3eece97/B, priority=13, startTime=1732785778535; duration=0sec 2024-11-28T09:22:59,123 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:22:59,123 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f65103e784e3773002ae1a64e3eece97:B 2024-11-28T09:22:59,205 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:59,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33014 deadline: 1732785839199, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:59,205 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:59,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32964 deadline: 1732785839199, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:59,207 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:59,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32982 deadline: 1732785839205, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:59,214 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:59,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33034 deadline: 1732785839209, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:59,231 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:59,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32966 deadline: 1732785839225, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:59,242 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/58ba4481c9b3429780c3db284fc056dd as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/58ba4481c9b3429780c3db284fc056dd 2024-11-28T09:22:59,246 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/0bb569162e514fda9d927c9d9cb40e04 2024-11-28T09:22:59,247 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f65103e784e3773002ae1a64e3eece97/C of f65103e784e3773002ae1a64e3eece97 into 58ba4481c9b3429780c3db284fc056dd(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:22:59,247 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f65103e784e3773002ae1a64e3eece97: 2024-11-28T09:22:59,247 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97., storeName=f65103e784e3773002ae1a64e3eece97/C, priority=13, startTime=1732785778535; duration=0sec 2024-11-28T09:22:59,247 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:22:59,247 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f65103e784e3773002ae1a64e3eece97:C 2024-11-28T09:22:59,254 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/8ad8c9fb6ff44320ab700476118e9b17 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/8ad8c9fb6ff44320ab700476118e9b17 2024-11-28T09:22:59,259 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/8ad8c9fb6ff44320ab700476118e9b17, entries=200, sequenceid=157, filesize=14.2 K 2024-11-28T09:22:59,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/9bf94fe2f6924e17977677f76cf499c4 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/9bf94fe2f6924e17977677f76cf499c4 2024-11-28T09:22:59,264 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/9bf94fe2f6924e17977677f76cf499c4, entries=150, sequenceid=157, filesize=11.9 K 2024-11-28T09:22:59,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/0bb569162e514fda9d927c9d9cb40e04 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/0bb569162e514fda9d927c9d9cb40e04 2024-11-28T09:22:59,270 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/0bb569162e514fda9d927c9d9cb40e04, entries=150, sequenceid=157, filesize=11.9 K 2024-11-28T09:22:59,271 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=53.67 KB/54960 for f65103e784e3773002ae1a64e3eece97 in 725ms, sequenceid=157, compaction requested=false 2024-11-28T09:22:59,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2538): Flush status journal for f65103e784e3773002ae1a64e3eece97: 2024-11-28T09:22:59,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:22:59,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=135 2024-11-28T09:22:59,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4106): Remote procedure done, pid=135 2024-11-28T09:22:59,274 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=135, resume processing ppid=134 2024-11-28T09:22:59,274 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=135, ppid=134, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6540 sec 2024-11-28T09:22:59,276 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=134, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees in 1.6620 sec 2024-11-28T09:22:59,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on f65103e784e3773002ae1a64e3eece97 2024-11-28T09:22:59,714 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f65103e784e3773002ae1a64e3eece97 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-28T09:22:59,714 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f65103e784e3773002ae1a64e3eece97, store=A 2024-11-28T09:22:59,714 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:59,714 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f65103e784e3773002ae1a64e3eece97, store=B 2024-11-28T09:22:59,714 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:59,714 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f65103e784e3773002ae1a64e3eece97, store=C 2024-11-28T09:22:59,714 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:22:59,719 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/9edff87bdd6c49d983debef6ab6daf5b is 50, key is test_row_0/A:col10/1732785778560/Put/seqid=0 2024-11-28T09:22:59,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-28T09:22:59,722 INFO [Thread-2397 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 134 completed 2024-11-28T09:22:59,724 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T09:22:59,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] procedure2.ProcedureExecutor(1098): Stored pid=136, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees 2024-11-28T09:22:59,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-28T09:22:59,725 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=136, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T09:22:59,726 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=136, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T09:22:59,726 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=137, ppid=136, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T09:22:59,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742403_1579 (size=16931) 2024-11-28T09:22:59,771 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:59,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32966 deadline: 1732785839759, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:59,772 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:59,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33014 deadline: 1732785839762, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:59,782 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:59,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33034 deadline: 1732785839766, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:59,783 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:59,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32982 deadline: 1732785839766, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:59,785 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:59,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32964 deadline: 1732785839766, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:59,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-28T09:22:59,878 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:22:59,878 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-28T09:22:59,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:22:59,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. as already flushing 2024-11-28T09:22:59,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:22:59,879 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:59,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:59,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:22:59,882 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:59,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32966 deadline: 1732785839873, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:59,883 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:59,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33014 deadline: 1732785839873, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:59,894 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:59,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33034 deadline: 1732785839884, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:59,895 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:59,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32982 deadline: 1732785839884, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:22:59,895 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:22:59,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32964 deadline: 1732785839886, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:00,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-28T09:23:00,031 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:23:00,032 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-28T09:23:00,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:23:00,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. as already flushing 2024-11-28T09:23:00,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:23:00,032 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:00,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:00,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:00,091 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:00,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32966 deadline: 1732785840085, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:00,093 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:00,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33014 deadline: 1732785840086, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:00,105 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:00,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32982 deadline: 1732785840096, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:00,105 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:00,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33034 deadline: 1732785840096, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:00,105 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:00,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32964 deadline: 1732785840098, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:00,153 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=173 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/9edff87bdd6c49d983debef6ab6daf5b 2024-11-28T09:23:00,181 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/af92c76e188a47f8abf88b93a7259e7d is 50, key is test_row_0/B:col10/1732785778560/Put/seqid=0 2024-11-28T09:23:00,184 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:23:00,185 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-28T09:23:00,185 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:23:00,185 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. as already flushing 2024-11-28T09:23:00,185 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:23:00,185 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:00,185 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:00,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:00,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742404_1580 (size=12151) 2024-11-28T09:23:00,226 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=173 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/af92c76e188a47f8abf88b93a7259e7d 2024-11-28T09:23:00,245 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/a3ec56771eea4ff89cbe2d51b3a2ee54 is 50, key is test_row_0/C:col10/1732785778560/Put/seqid=0 2024-11-28T09:23:00,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742405_1581 (size=12151) 2024-11-28T09:23:00,286 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=173 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/a3ec56771eea4ff89cbe2d51b3a2ee54 2024-11-28T09:23:00,305 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/9edff87bdd6c49d983debef6ab6daf5b as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/9edff87bdd6c49d983debef6ab6daf5b 2024-11-28T09:23:00,313 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/9edff87bdd6c49d983debef6ab6daf5b, entries=250, sequenceid=173, filesize=16.5 K 2024-11-28T09:23:00,315 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/af92c76e188a47f8abf88b93a7259e7d as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/af92c76e188a47f8abf88b93a7259e7d 2024-11-28T09:23:00,322 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/af92c76e188a47f8abf88b93a7259e7d, entries=150, sequenceid=173, filesize=11.9 K 2024-11-28T09:23:00,323 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/a3ec56771eea4ff89cbe2d51b3a2ee54 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/a3ec56771eea4ff89cbe2d51b3a2ee54 2024-11-28T09:23:00,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-28T09:23:00,328 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/a3ec56771eea4ff89cbe2d51b3a2ee54, entries=150, sequenceid=173, filesize=11.9 K 2024-11-28T09:23:00,329 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for f65103e784e3773002ae1a64e3eece97 in 616ms, sequenceid=173, compaction requested=true 2024-11-28T09:23:00,329 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f65103e784e3773002ae1a64e3eece97: 2024-11-28T09:23:00,329 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f65103e784e3773002ae1a64e3eece97:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T09:23:00,329 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:23:00,329 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f65103e784e3773002ae1a64e3eece97:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T09:23:00,329 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-28T09:23:00,329 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f65103e784e3773002ae1a64e3eece97:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T09:23:00,329 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-11-28T09:23:00,330 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:23:00,330 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:23:00,331 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36661 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:23:00,331 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): f65103e784e3773002ae1a64e3eece97/C is initiating minor compaction (all files) 2024-11-28T09:23:00,331 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f65103e784e3773002ae1a64e3eece97/C in TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:23:00,332 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/58ba4481c9b3429780c3db284fc056dd, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/0bb569162e514fda9d927c9d9cb40e04, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/a3ec56771eea4ff89cbe2d51b3a2ee54] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp, totalSize=35.8 K 2024-11-28T09:23:00,333 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 43831 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:23:00,333 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1540): f65103e784e3773002ae1a64e3eece97/A is initiating minor compaction (all files) 2024-11-28T09:23:00,333 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f65103e784e3773002ae1a64e3eece97/A in TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:23:00,333 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/3588334326a74af38aa07b85af223fed, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/8ad8c9fb6ff44320ab700476118e9b17, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/9edff87bdd6c49d983debef6ab6daf5b] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp, totalSize=42.8 K 2024-11-28T09:23:00,334 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 58ba4481c9b3429780c3db284fc056dd, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1732785777696 2024-11-28T09:23:00,334 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3588334326a74af38aa07b85af223fed, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1732785777696 2024-11-28T09:23:00,334 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 0bb569162e514fda9d927c9d9cb40e04, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1732785778405 2024-11-28T09:23:00,335 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8ad8c9fb6ff44320ab700476118e9b17, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1732785778405 2024-11-28T09:23:00,335 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting a3ec56771eea4ff89cbe2d51b3a2ee54, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1732785778556 2024-11-28T09:23:00,336 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9edff87bdd6c49d983debef6ab6daf5b, keycount=250, bloomtype=ROW, size=16.5 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1732785778556 2024-11-28T09:23:00,339 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:23:00,339 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-28T09:23:00,339 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:23:00,339 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2837): Flushing f65103e784e3773002ae1a64e3eece97 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-28T09:23:00,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f65103e784e3773002ae1a64e3eece97, store=A 2024-11-28T09:23:00,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:00,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f65103e784e3773002ae1a64e3eece97, store=B 2024-11-28T09:23:00,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:00,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f65103e784e3773002ae1a64e3eece97, store=C 2024-11-28T09:23:00,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:00,352 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/329b7c99765548ddb6ba6003a4f47acf is 50, key is test_row_0/A:col10/1732785779765/Put/seqid=0 2024-11-28T09:23:00,367 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f65103e784e3773002ae1a64e3eece97#C#compaction#496 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:23:00,367 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/df2d3afe7d7f40f9a241275202186ff7 is 50, key is test_row_0/C:col10/1732785778560/Put/seqid=0 2024-11-28T09:23:00,376 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f65103e784e3773002ae1a64e3eece97#A#compaction#497 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:23:00,376 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/ea9118de701e4b508fa1a2357604a945 is 50, key is test_row_0/A:col10/1732785778560/Put/seqid=0 2024-11-28T09:23:00,413 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. as already flushing 2024-11-28T09:23:00,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on f65103e784e3773002ae1a64e3eece97 2024-11-28T09:23:00,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742406_1582 (size=12151) 2024-11-28T09:23:00,427 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=196 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/329b7c99765548ddb6ba6003a4f47acf 2024-11-28T09:23:00,445 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:00,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33014 deadline: 1732785840428, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:00,456 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:00,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32966 deadline: 1732785840435, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:00,456 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:00,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32982 deadline: 1732785840435, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:00,457 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:00,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33034 deadline: 1732785840435, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:00,457 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:00,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32964 deadline: 1732785840436, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:00,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742407_1583 (size=12561) 2024-11-28T09:23:00,463 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/df2d3afe7d7f40f9a241275202186ff7 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/df2d3afe7d7f40f9a241275202186ff7 2024-11-28T09:23:00,467 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f65103e784e3773002ae1a64e3eece97/C of f65103e784e3773002ae1a64e3eece97 into df2d3afe7d7f40f9a241275202186ff7(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:23:00,467 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f65103e784e3773002ae1a64e3eece97: 2024-11-28T09:23:00,468 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97., storeName=f65103e784e3773002ae1a64e3eece97/C, priority=13, startTime=1732785780329; duration=0sec 2024-11-28T09:23:00,468 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:23:00,468 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f65103e784e3773002ae1a64e3eece97:C 2024-11-28T09:23:00,468 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:23:00,469 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36661 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:23:00,469 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): f65103e784e3773002ae1a64e3eece97/B is initiating minor compaction (all files) 2024-11-28T09:23:00,469 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f65103e784e3773002ae1a64e3eece97/B in TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:23:00,469 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/a3b416310e59405587cb26e6b61f4375, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/9bf94fe2f6924e17977677f76cf499c4, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/af92c76e188a47f8abf88b93a7259e7d] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp, totalSize=35.8 K 2024-11-28T09:23:00,469 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting a3b416310e59405587cb26e6b61f4375, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=131, earliestPutTs=1732785777696 2024-11-28T09:23:00,470 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 9bf94fe2f6924e17977677f76cf499c4, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1732785778405 2024-11-28T09:23:00,470 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting af92c76e188a47f8abf88b93a7259e7d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1732785778556 2024-11-28T09:23:00,472 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/4151250f8f0c4de39442f51fd8e91246 is 50, key is test_row_0/B:col10/1732785779765/Put/seqid=0 2024-11-28T09:23:00,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742408_1584 (size=12561) 2024-11-28T09:23:00,501 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/ea9118de701e4b508fa1a2357604a945 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/ea9118de701e4b508fa1a2357604a945 2024-11-28T09:23:00,506 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f65103e784e3773002ae1a64e3eece97#B#compaction#499 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:23:00,506 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/67162c5798dd4423b33cf41d7708d8a4 is 50, key is test_row_0/B:col10/1732785778560/Put/seqid=0 2024-11-28T09:23:00,509 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f65103e784e3773002ae1a64e3eece97/A of f65103e784e3773002ae1a64e3eece97 into ea9118de701e4b508fa1a2357604a945(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:23:00,509 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f65103e784e3773002ae1a64e3eece97: 2024-11-28T09:23:00,509 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97., storeName=f65103e784e3773002ae1a64e3eece97/A, priority=13, startTime=1732785780329; duration=0sec 2024-11-28T09:23:00,509 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:23:00,509 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f65103e784e3773002ae1a64e3eece97:A 2024-11-28T09:23:00,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742409_1585 (size=12151) 2024-11-28T09:23:00,561 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:00,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33014 deadline: 1732785840547, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:00,563 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:00,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32966 deadline: 1732785840557, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:00,563 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:00,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32982 deadline: 1732785840557, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:00,563 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:00,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33034 deadline: 1732785840558, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:00,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742410_1586 (size=12561) 2024-11-28T09:23:00,573 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:00,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32964 deadline: 1732785840560, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:00,770 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:00,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33014 deadline: 1732785840763, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:00,771 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:00,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32966 deadline: 1732785840764, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:00,771 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:00,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32982 deadline: 1732785840764, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:00,771 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:00,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33034 deadline: 1732785840765, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:00,780 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:00,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32964 deadline: 1732785840776, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:00,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-28T09:23:00,935 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=196 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/4151250f8f0c4de39442f51fd8e91246 2024-11-28T09:23:00,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/930121b17e0d44abafaa70883f3cc793 is 50, key is test_row_0/C:col10/1732785779765/Put/seqid=0 2024-11-28T09:23:00,980 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/67162c5798dd4423b33cf41d7708d8a4 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/67162c5798dd4423b33cf41d7708d8a4 2024-11-28T09:23:00,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742411_1587 (size=12151) 2024-11-28T09:23:00,987 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f65103e784e3773002ae1a64e3eece97/B of f65103e784e3773002ae1a64e3eece97 into 67162c5798dd4423b33cf41d7708d8a4(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:23:00,987 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f65103e784e3773002ae1a64e3eece97: 2024-11-28T09:23:00,987 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97., storeName=f65103e784e3773002ae1a64e3eece97/B, priority=13, startTime=1732785780329; duration=0sec 2024-11-28T09:23:00,987 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:23:00,987 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f65103e784e3773002ae1a64e3eece97:B 2024-11-28T09:23:00,987 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=196 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/930121b17e0d44abafaa70883f3cc793 2024-11-28T09:23:00,998 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/329b7c99765548ddb6ba6003a4f47acf as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/329b7c99765548ddb6ba6003a4f47acf 2024-11-28T09:23:01,007 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/329b7c99765548ddb6ba6003a4f47acf, entries=150, sequenceid=196, filesize=11.9 K 2024-11-28T09:23:01,008 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/4151250f8f0c4de39442f51fd8e91246 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/4151250f8f0c4de39442f51fd8e91246 2024-11-28T09:23:01,015 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/4151250f8f0c4de39442f51fd8e91246, entries=150, sequenceid=196, filesize=11.9 K 2024-11-28T09:23:01,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/930121b17e0d44abafaa70883f3cc793 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/930121b17e0d44abafaa70883f3cc793 2024-11-28T09:23:01,025 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/930121b17e0d44abafaa70883f3cc793, entries=150, sequenceid=196, filesize=11.9 K 2024-11-28T09:23:01,025 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for f65103e784e3773002ae1a64e3eece97 in 686ms, sequenceid=196, compaction requested=false 2024-11-28T09:23:01,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2538): Flush status journal for f65103e784e3773002ae1a64e3eece97: 2024-11-28T09:23:01,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:23:01,026 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=137 2024-11-28T09:23:01,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4106): Remote procedure done, pid=137 2024-11-28T09:23:01,028 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=137, resume processing ppid=136 2024-11-28T09:23:01,028 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=137, ppid=136, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3010 sec 2024-11-28T09:23:01,030 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=136, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees in 1.3050 sec 2024-11-28T09:23:01,081 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f65103e784e3773002ae1a64e3eece97 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-28T09:23:01,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on f65103e784e3773002ae1a64e3eece97 2024-11-28T09:23:01,081 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f65103e784e3773002ae1a64e3eece97, store=A 2024-11-28T09:23:01,081 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:01,081 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f65103e784e3773002ae1a64e3eece97, store=B 2024-11-28T09:23:01,081 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:01,081 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f65103e784e3773002ae1a64e3eece97, store=C 2024-11-28T09:23:01,081 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:01,087 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/04237a67b4aa477a84b4425801ff019c is 50, key is test_row_0/A:col10/1732785781080/Put/seqid=0 2024-11-28T09:23:01,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742412_1588 (size=14541) 2024-11-28T09:23:01,152 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:01,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32982 deadline: 1732785841143, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:01,152 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:01,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32964 deadline: 1732785841148, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:01,159 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:01,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33034 deadline: 1732785841151, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:01,166 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:01,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33014 deadline: 1732785841152, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:01,167 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:01,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32966 deadline: 1732785841153, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:01,263 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:01,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32982 deadline: 1732785841253, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:01,264 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:01,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32964 deadline: 1732785841254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:01,268 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:01,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33034 deadline: 1732785841262, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:01,275 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:01,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33014 deadline: 1732785841267, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:01,277 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:01,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32966 deadline: 1732785841270, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:01,475 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:01,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32982 deadline: 1732785841466, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:01,475 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:01,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33034 deadline: 1732785841469, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:01,477 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:01,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32964 deadline: 1732785841472, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:01,489 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:01,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33014 deadline: 1732785841476, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:01,495 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:01,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32966 deadline: 1732785841478, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:01,505 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=215 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/04237a67b4aa477a84b4425801ff019c 2024-11-28T09:23:01,515 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/990e4ec55ef44277b7fe43657529fc7d is 50, key is test_row_0/B:col10/1732785781080/Put/seqid=0 2024-11-28T09:23:01,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742413_1589 (size=12151) 2024-11-28T09:23:01,583 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=215 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/990e4ec55ef44277b7fe43657529fc7d 2024-11-28T09:23:01,605 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/7a8b0d062d344a749111256b823da0eb is 50, key is test_row_0/C:col10/1732785781080/Put/seqid=0 2024-11-28T09:23:01,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742414_1590 (size=12151) 2024-11-28T09:23:01,638 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=215 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/7a8b0d062d344a749111256b823da0eb 2024-11-28T09:23:01,645 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/04237a67b4aa477a84b4425801ff019c as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/04237a67b4aa477a84b4425801ff019c 2024-11-28T09:23:01,652 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/04237a67b4aa477a84b4425801ff019c, entries=200, sequenceid=215, filesize=14.2 K 2024-11-28T09:23:01,660 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/990e4ec55ef44277b7fe43657529fc7d as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/990e4ec55ef44277b7fe43657529fc7d 2024-11-28T09:23:01,671 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/990e4ec55ef44277b7fe43657529fc7d, entries=150, sequenceid=215, filesize=11.9 K 2024-11-28T09:23:01,672 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/7a8b0d062d344a749111256b823da0eb as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/7a8b0d062d344a749111256b823da0eb 2024-11-28T09:23:01,677 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/7a8b0d062d344a749111256b823da0eb, entries=150, sequenceid=215, filesize=11.9 K 2024-11-28T09:23:01,678 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=120.76 KB/123660 for f65103e784e3773002ae1a64e3eece97 in 597ms, sequenceid=215, compaction requested=true 2024-11-28T09:23:01,678 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f65103e784e3773002ae1a64e3eece97: 2024-11-28T09:23:01,678 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:23:01,679 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f65103e784e3773002ae1a64e3eece97:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T09:23:01,679 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:23:01,679 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:23:01,679 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f65103e784e3773002ae1a64e3eece97:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T09:23:01,679 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39253 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:23:01,679 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1540): f65103e784e3773002ae1a64e3eece97/A is initiating minor compaction (all files) 2024-11-28T09:23:01,679 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f65103e784e3773002ae1a64e3eece97/A in TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:23:01,679 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:23:01,679 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f65103e784e3773002ae1a64e3eece97:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T09:23:01,679 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:23:01,679 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/ea9118de701e4b508fa1a2357604a945, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/329b7c99765548ddb6ba6003a4f47acf, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/04237a67b4aa477a84b4425801ff019c] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp, totalSize=38.3 K 2024-11-28T09:23:01,680 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:23:01,680 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): f65103e784e3773002ae1a64e3eece97/B is initiating minor compaction (all files) 2024-11-28T09:23:01,680 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f65103e784e3773002ae1a64e3eece97/B in TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:23:01,680 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/67162c5798dd4423b33cf41d7708d8a4, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/4151250f8f0c4de39442f51fd8e91246, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/990e4ec55ef44277b7fe43657529fc7d] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp, totalSize=36.0 K 2024-11-28T09:23:01,680 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting ea9118de701e4b508fa1a2357604a945, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1732785778556 2024-11-28T09:23:01,680 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 67162c5798dd4423b33cf41d7708d8a4, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1732785778556 2024-11-28T09:23:01,681 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 329b7c99765548ddb6ba6003a4f47acf, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1732785779761 2024-11-28T09:23:01,681 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 04237a67b4aa477a84b4425801ff019c, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1732785780428 2024-11-28T09:23:01,681 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 4151250f8f0c4de39442f51fd8e91246, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1732785779761 2024-11-28T09:23:01,681 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 990e4ec55ef44277b7fe43657529fc7d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1732785780428 2024-11-28T09:23:01,701 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f65103e784e3773002ae1a64e3eece97#A#compaction#504 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:23:01,702 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/b81390e8b207424fa25563d1ede778dc is 50, key is test_row_0/A:col10/1732785781080/Put/seqid=0 2024-11-28T09:23:01,708 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f65103e784e3773002ae1a64e3eece97#B#compaction#505 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:23:01,708 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/fbf4858f89d84e2dbcf641c4ec3f9d43 is 50, key is test_row_0/B:col10/1732785781080/Put/seqid=0 2024-11-28T09:23:01,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742415_1591 (size=12663) 2024-11-28T09:23:01,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742416_1592 (size=12663) 2024-11-28T09:23:01,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on f65103e784e3773002ae1a64e3eece97 2024-11-28T09:23:01,781 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f65103e784e3773002ae1a64e3eece97 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-28T09:23:01,783 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f65103e784e3773002ae1a64e3eece97, store=A 2024-11-28T09:23:01,783 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:01,783 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f65103e784e3773002ae1a64e3eece97, store=B 2024-11-28T09:23:01,783 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:01,783 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f65103e784e3773002ae1a64e3eece97, store=C 2024-11-28T09:23:01,783 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:01,801 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/0bc6485396004155a04fe85a54d5b2b9 is 50, key is test_row_0/A:col10/1732785781150/Put/seqid=0 2024-11-28T09:23:01,815 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:01,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33014 deadline: 1732785841801, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:01,824 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:01,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33034 deadline: 1732785841804, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:01,825 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:01,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32964 deadline: 1732785841805, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:01,825 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:01,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32982 deadline: 1732785841809, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:01,826 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:01,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32966 deadline: 1732785841810, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:01,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-28T09:23:01,830 INFO [Thread-2397 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 136 completed 2024-11-28T09:23:01,831 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T09:23:01,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] procedure2.ProcedureExecutor(1098): Stored pid=138, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees 2024-11-28T09:23:01,833 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=138, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T09:23:01,834 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=138, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T09:23:01,834 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=139, ppid=138, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T09:23:01,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-28T09:23:01,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742417_1593 (size=16931) 2024-11-28T09:23:01,921 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:01,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33014 deadline: 1732785841916, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:01,928 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:01,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33034 deadline: 1732785841926, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:01,931 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:01,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32964 deadline: 1732785841926, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:01,932 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:01,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32966 deadline: 1732785841927, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:01,933 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:01,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32982 deadline: 1732785841928, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:01,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-28T09:23:01,987 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:23:01,987 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-28T09:23:01,987 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:23:01,987 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. as already flushing 2024-11-28T09:23:01,987 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:23:01,988 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:01,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:01,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:02,126 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:02,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33014 deadline: 1732785842123, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:02,132 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/b81390e8b207424fa25563d1ede778dc as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/b81390e8b207424fa25563d1ede778dc 2024-11-28T09:23:02,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-28T09:23:02,138 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f65103e784e3773002ae1a64e3eece97/A of f65103e784e3773002ae1a64e3eece97 into b81390e8b207424fa25563d1ede778dc(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:23:02,139 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f65103e784e3773002ae1a64e3eece97: 2024-11-28T09:23:02,139 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97., storeName=f65103e784e3773002ae1a64e3eece97/A, priority=13, startTime=1732785781678; duration=0sec 2024-11-28T09:23:02,139 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:23:02,139 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f65103e784e3773002ae1a64e3eece97:A 2024-11-28T09:23:02,139 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:23:02,139 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:02,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33034 deadline: 1732785842130, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:02,140 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:02,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32966 deadline: 1732785842134, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:02,140 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36863 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:23:02,140 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1540): f65103e784e3773002ae1a64e3eece97/C is initiating minor compaction (all files) 2024-11-28T09:23:02,140 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f65103e784e3773002ae1a64e3eece97/C in TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:23:02,140 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/df2d3afe7d7f40f9a241275202186ff7, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/930121b17e0d44abafaa70883f3cc793, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/7a8b0d062d344a749111256b823da0eb] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp, totalSize=36.0 K 2024-11-28T09:23:02,141 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting df2d3afe7d7f40f9a241275202186ff7, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1732785778556 2024-11-28T09:23:02,141 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:23:02,141 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 930121b17e0d44abafaa70883f3cc793, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=196, earliestPutTs=1732785779761 2024-11-28T09:23:02,142 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-28T09:23:02,142 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7a8b0d062d344a749111256b823da0eb, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1732785780428 2024-11-28T09:23:02,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:23:02,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. as already flushing 2024-11-28T09:23:02,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:23:02,142 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:02,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:02,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:02,147 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:02,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32982 deadline: 1732785842136, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:02,148 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:02,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32964 deadline: 1732785842136, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:02,166 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f65103e784e3773002ae1a64e3eece97#C#compaction#507 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:23:02,166 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/b255cf01c29d44b7a705911ced76e1b5 is 50, key is test_row_0/C:col10/1732785781080/Put/seqid=0 2024-11-28T09:23:02,186 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/fbf4858f89d84e2dbcf641c4ec3f9d43 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/fbf4858f89d84e2dbcf641c4ec3f9d43 2024-11-28T09:23:02,201 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f65103e784e3773002ae1a64e3eece97/B of f65103e784e3773002ae1a64e3eece97 into fbf4858f89d84e2dbcf641c4ec3f9d43(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:23:02,201 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f65103e784e3773002ae1a64e3eece97: 2024-11-28T09:23:02,201 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97., storeName=f65103e784e3773002ae1a64e3eece97/B, priority=13, startTime=1732785781679; duration=0sec 2024-11-28T09:23:02,201 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:23:02,201 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f65103e784e3773002ae1a64e3eece97:B 2024-11-28T09:23:02,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742418_1594 (size=12663) 2024-11-28T09:23:02,207 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/b255cf01c29d44b7a705911ced76e1b5 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/b255cf01c29d44b7a705911ced76e1b5 2024-11-28T09:23:02,218 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f65103e784e3773002ae1a64e3eece97/C of f65103e784e3773002ae1a64e3eece97 into b255cf01c29d44b7a705911ced76e1b5(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:23:02,218 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f65103e784e3773002ae1a64e3eece97: 2024-11-28T09:23:02,219 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97., storeName=f65103e784e3773002ae1a64e3eece97/C, priority=13, startTime=1732785781679; duration=0sec 2024-11-28T09:23:02,219 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:23:02,219 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f65103e784e3773002ae1a64e3eece97:C 2024-11-28T09:23:02,246 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=238 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/0bc6485396004155a04fe85a54d5b2b9 2024-11-28T09:23:02,265 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/1389b470ec224e2990d9caeead4edbe7 is 50, key is test_row_0/B:col10/1732785781150/Put/seqid=0 2024-11-28T09:23:02,294 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:23:02,295 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-28T09:23:02,295 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:23:02,295 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. as already flushing 2024-11-28T09:23:02,295 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:23:02,295 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:02,295 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:02,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742419_1595 (size=12151) 2024-11-28T09:23:02,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:02,434 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:02,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33014 deadline: 1732785842431, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:02,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-28T09:23:02,441 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:02,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33034 deadline: 1732785842441, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:02,447 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:23:02,448 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-28T09:23:02,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:23:02,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. as already flushing 2024-11-28T09:23:02,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:23:02,448 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:02,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:02,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:02,451 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:02,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32966 deadline: 1732785842443, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:02,456 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:02,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32982 deadline: 1732785842450, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:02,456 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:02,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32964 deadline: 1732785842451, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:02,597 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/363d8d38a970:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] regionserver.HStore(2316): Moving the files [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/068defc936dd4b1b8d51e134b7819ed5, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/10174d2fe7164d9c9c9d7efe37abc283, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/0db4b629d5f24616ba7f2b48afb28f20, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/ada7d11180e04447b73abea8fd6b362e, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/f404c24957174f53a893e160c8d33dde, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/a85ac5a39d8f45cf9f4c950c5d8a47fc, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/70790720243d4bbb8b6a7e87d231b1af, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/064891f3bf07452abf91c75e3da6fbc5, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/89fbe02bdc324e35816858d25367aa7b, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/3588334326a74af38aa07b85af223fed, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/8ad8c9fb6ff44320ab700476118e9b17, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/9edff87bdd6c49d983debef6ab6daf5b, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/ea9118de701e4b508fa1a2357604a945, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/329b7c99765548ddb6ba6003a4f47acf, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/04237a67b4aa477a84b4425801ff019c] to archive 2024-11-28T09:23:02,599 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/363d8d38a970:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-28T09:23:02,600 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:23:02,601 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-28T09:23:02,601 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:23:02,601 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. as already flushing 2024-11-28T09:23:02,601 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:23:02,601 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/363d8d38a970:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/068defc936dd4b1b8d51e134b7819ed5 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/068defc936dd4b1b8d51e134b7819ed5 2024-11-28T09:23:02,601 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:02,601 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:02,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:02,606 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/363d8d38a970:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/10174d2fe7164d9c9c9d7efe37abc283 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/10174d2fe7164d9c9c9d7efe37abc283 2024-11-28T09:23:02,609 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/363d8d38a970:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/0db4b629d5f24616ba7f2b48afb28f20 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/0db4b629d5f24616ba7f2b48afb28f20 2024-11-28T09:23:02,629 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/363d8d38a970:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/ada7d11180e04447b73abea8fd6b362e to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/ada7d11180e04447b73abea8fd6b362e 2024-11-28T09:23:02,633 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/363d8d38a970:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/f404c24957174f53a893e160c8d33dde to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/f404c24957174f53a893e160c8d33dde 2024-11-28T09:23:02,636 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/363d8d38a970:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/a85ac5a39d8f45cf9f4c950c5d8a47fc to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/a85ac5a39d8f45cf9f4c950c5d8a47fc 2024-11-28T09:23:02,637 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/363d8d38a970:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/70790720243d4bbb8b6a7e87d231b1af to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/70790720243d4bbb8b6a7e87d231b1af 2024-11-28T09:23:02,639 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/363d8d38a970:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/064891f3bf07452abf91c75e3da6fbc5 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/064891f3bf07452abf91c75e3da6fbc5 2024-11-28T09:23:02,640 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/363d8d38a970:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/89fbe02bdc324e35816858d25367aa7b to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/89fbe02bdc324e35816858d25367aa7b 2024-11-28T09:23:02,641 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/363d8d38a970:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/3588334326a74af38aa07b85af223fed to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/3588334326a74af38aa07b85af223fed 2024-11-28T09:23:02,643 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/363d8d38a970:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/8ad8c9fb6ff44320ab700476118e9b17 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/8ad8c9fb6ff44320ab700476118e9b17 2024-11-28T09:23:02,644 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/363d8d38a970:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/9edff87bdd6c49d983debef6ab6daf5b to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/9edff87bdd6c49d983debef6ab6daf5b 2024-11-28T09:23:02,653 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/363d8d38a970:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/ea9118de701e4b508fa1a2357604a945 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/ea9118de701e4b508fa1a2357604a945 2024-11-28T09:23:02,655 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/363d8d38a970:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/329b7c99765548ddb6ba6003a4f47acf to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/329b7c99765548ddb6ba6003a4f47acf 2024-11-28T09:23:02,656 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/363d8d38a970:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/04237a67b4aa477a84b4425801ff019c to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/04237a67b4aa477a84b4425801ff019c 2024-11-28T09:23:02,662 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/363d8d38a970:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] regionserver.HStore(2316): Moving the files [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/619a8fc76e864554b47d6789c813d70c, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/866691b9dbb545418783e6794dbd6970, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/4d30ded1c0c248048fee02b20f3dcc7a, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/4c4dc96d134549c1978c77bb8b249209, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/4eee4e40473c4aa8a5ee489671b6caf7, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/5c207240939240adb56f1dd757231951, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/4a693697bbdb45bc905aabd9bacc46aa, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/44161acacce2469ba16967b5cd758938, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/a3b416310e59405587cb26e6b61f4375, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/9869ed8131bd43e49c3df39eb139fe71, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/9bf94fe2f6924e17977677f76cf499c4, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/67162c5798dd4423b33cf41d7708d8a4, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/af92c76e188a47f8abf88b93a7259e7d, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/4151250f8f0c4de39442f51fd8e91246, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/990e4ec55ef44277b7fe43657529fc7d] to archive 2024-11-28T09:23:02,663 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/363d8d38a970:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-28T09:23:02,668 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/363d8d38a970:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/619a8fc76e864554b47d6789c813d70c to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/619a8fc76e864554b47d6789c813d70c 2024-11-28T09:23:02,670 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/363d8d38a970:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/866691b9dbb545418783e6794dbd6970 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/866691b9dbb545418783e6794dbd6970 2024-11-28T09:23:02,671 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/363d8d38a970:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/4d30ded1c0c248048fee02b20f3dcc7a to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/4d30ded1c0c248048fee02b20f3dcc7a 2024-11-28T09:23:02,673 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/363d8d38a970:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/4c4dc96d134549c1978c77bb8b249209 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/4c4dc96d134549c1978c77bb8b249209 2024-11-28T09:23:02,676 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/363d8d38a970:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/4eee4e40473c4aa8a5ee489671b6caf7 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/4eee4e40473c4aa8a5ee489671b6caf7 2024-11-28T09:23:02,677 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/363d8d38a970:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/5c207240939240adb56f1dd757231951 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/5c207240939240adb56f1dd757231951 2024-11-28T09:23:02,679 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/363d8d38a970:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/4a693697bbdb45bc905aabd9bacc46aa to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/4a693697bbdb45bc905aabd9bacc46aa 2024-11-28T09:23:02,680 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/363d8d38a970:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/44161acacce2469ba16967b5cd758938 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/44161acacce2469ba16967b5cd758938 2024-11-28T09:23:02,682 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/363d8d38a970:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/a3b416310e59405587cb26e6b61f4375 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/a3b416310e59405587cb26e6b61f4375 2024-11-28T09:23:02,684 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/363d8d38a970:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/9869ed8131bd43e49c3df39eb139fe71 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/9869ed8131bd43e49c3df39eb139fe71 2024-11-28T09:23:02,685 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/363d8d38a970:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/9bf94fe2f6924e17977677f76cf499c4 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/9bf94fe2f6924e17977677f76cf499c4 2024-11-28T09:23:02,691 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/363d8d38a970:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/67162c5798dd4423b33cf41d7708d8a4 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/67162c5798dd4423b33cf41d7708d8a4 2024-11-28T09:23:02,693 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/363d8d38a970:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/af92c76e188a47f8abf88b93a7259e7d to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/af92c76e188a47f8abf88b93a7259e7d 2024-11-28T09:23:02,696 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=238 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/1389b470ec224e2990d9caeead4edbe7 2024-11-28T09:23:02,696 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/363d8d38a970:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/4151250f8f0c4de39442f51fd8e91246 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/4151250f8f0c4de39442f51fd8e91246 2024-11-28T09:23:02,698 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/363d8d38a970:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/990e4ec55ef44277b7fe43657529fc7d to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/990e4ec55ef44277b7fe43657529fc7d 2024-11-28T09:23:02,705 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/363d8d38a970:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] regionserver.HStore(2316): Moving the files [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/be08734d29dd4c13bc0db6d287f9e402, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/45527bbe9f2541cd868f8a6386285020, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/3488c88d1b6d4517ae68536487332d95, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/f0fd41e87a644c239476d25e896b54b7, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/6e20467852be44608fa81cacc44e485e, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/62de3eaad347455daf304ff033f206f9, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/b0759490089d4821b0ccf8485a66a9e8, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/491da61e21324cde8f2512eb67bb3881, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/58ba4481c9b3429780c3db284fc056dd, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/20f442b7dd704a00a2c152752153b702, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/0bb569162e514fda9d927c9d9cb40e04, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/df2d3afe7d7f40f9a241275202186ff7, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/a3ec56771eea4ff89cbe2d51b3a2ee54, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/930121b17e0d44abafaa70883f3cc793, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/7a8b0d062d344a749111256b823da0eb] to archive 2024-11-28T09:23:02,705 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/363d8d38a970:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-28T09:23:02,707 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/363d8d38a970:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/be08734d29dd4c13bc0db6d287f9e402 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/be08734d29dd4c13bc0db6d287f9e402 2024-11-28T09:23:02,708 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/363d8d38a970:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/45527bbe9f2541cd868f8a6386285020 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/45527bbe9f2541cd868f8a6386285020 2024-11-28T09:23:02,711 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/db749e7d99b348c88ce94b7665b5b7a7 is 50, key is test_row_0/C:col10/1732785781150/Put/seqid=0 2024-11-28T09:23:02,712 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/363d8d38a970:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/3488c88d1b6d4517ae68536487332d95 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/3488c88d1b6d4517ae68536487332d95 2024-11-28T09:23:02,713 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/363d8d38a970:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/f0fd41e87a644c239476d25e896b54b7 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/f0fd41e87a644c239476d25e896b54b7 2024-11-28T09:23:02,714 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/363d8d38a970:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/6e20467852be44608fa81cacc44e485e to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/6e20467852be44608fa81cacc44e485e 2024-11-28T09:23:02,717 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/363d8d38a970:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/62de3eaad347455daf304ff033f206f9 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/62de3eaad347455daf304ff033f206f9 2024-11-28T09:23:02,718 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/363d8d38a970:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/b0759490089d4821b0ccf8485a66a9e8 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/b0759490089d4821b0ccf8485a66a9e8 2024-11-28T09:23:02,721 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/363d8d38a970:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/491da61e21324cde8f2512eb67bb3881 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/491da61e21324cde8f2512eb67bb3881 2024-11-28T09:23:02,722 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/363d8d38a970:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/58ba4481c9b3429780c3db284fc056dd to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/58ba4481c9b3429780c3db284fc056dd 2024-11-28T09:23:02,723 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/363d8d38a970:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/20f442b7dd704a00a2c152752153b702 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/20f442b7dd704a00a2c152752153b702 2024-11-28T09:23:02,725 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/363d8d38a970:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/0bb569162e514fda9d927c9d9cb40e04 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/0bb569162e514fda9d927c9d9cb40e04 2024-11-28T09:23:02,727 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/363d8d38a970:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/df2d3afe7d7f40f9a241275202186ff7 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/df2d3afe7d7f40f9a241275202186ff7 2024-11-28T09:23:02,729 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/363d8d38a970:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/a3ec56771eea4ff89cbe2d51b3a2ee54 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/a3ec56771eea4ff89cbe2d51b3a2ee54 2024-11-28T09:23:02,731 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/363d8d38a970:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/930121b17e0d44abafaa70883f3cc793 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/930121b17e0d44abafaa70883f3cc793 2024-11-28T09:23:02,734 DEBUG [RS_COMPACTED_FILES_DISCHARGER-regionserver/363d8d38a970:0-0 {event_type=RS_COMPACTED_FILES_DISCHARGER}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/7a8b0d062d344a749111256b823da0eb to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/7a8b0d062d344a749111256b823da0eb 2024-11-28T09:23:02,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742420_1596 (size=12151) 2024-11-28T09:23:02,753 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=238 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/db749e7d99b348c88ce94b7665b5b7a7 2024-11-28T09:23:02,754 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:23:02,754 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-28T09:23:02,755 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:23:02,755 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. as already flushing 2024-11-28T09:23:02,755 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:23:02,755 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] handler.RSProcedureHandler(58): pid=139 java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:02,755 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=139 java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:02,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=139 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:02,760 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/0bc6485396004155a04fe85a54d5b2b9 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/0bc6485396004155a04fe85a54d5b2b9 2024-11-28T09:23:02,767 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/0bc6485396004155a04fe85a54d5b2b9, entries=250, sequenceid=238, filesize=16.5 K 2024-11-28T09:23:02,768 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/1389b470ec224e2990d9caeead4edbe7 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/1389b470ec224e2990d9caeead4edbe7 2024-11-28T09:23:02,774 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/1389b470ec224e2990d9caeead4edbe7, entries=150, sequenceid=238, filesize=11.9 K 2024-11-28T09:23:02,775 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/db749e7d99b348c88ce94b7665b5b7a7 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/db749e7d99b348c88ce94b7665b5b7a7 2024-11-28T09:23:02,781 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/db749e7d99b348c88ce94b7665b5b7a7, entries=150, sequenceid=238, filesize=11.9 K 2024-11-28T09:23:02,782 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for f65103e784e3773002ae1a64e3eece97 in 1001ms, sequenceid=238, compaction requested=false 2024-11-28T09:23:02,782 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f65103e784e3773002ae1a64e3eece97: 2024-11-28T09:23:02,908 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:23:02,909 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-28T09:23:02,909 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:23:02,909 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2837): Flushing f65103e784e3773002ae1a64e3eece97 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-28T09:23:02,909 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f65103e784e3773002ae1a64e3eece97, store=A 2024-11-28T09:23:02,909 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:02,909 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f65103e784e3773002ae1a64e3eece97, store=B 2024-11-28T09:23:02,909 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:02,909 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f65103e784e3773002ae1a64e3eece97, store=C 2024-11-28T09:23:02,909 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:02,918 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/542eb56564074c56a77572ecf93a709f is 50, key is test_row_0/A:col10/1732785781809/Put/seqid=0 2024-11-28T09:23:02,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-28T09:23:02,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on f65103e784e3773002ae1a64e3eece97 2024-11-28T09:23:02,944 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. as already flushing 2024-11-28T09:23:02,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742421_1597 (size=12151) 2024-11-28T09:23:03,010 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:03,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33014 deadline: 1732785842998, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:03,011 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:03,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32966 deadline: 1732785842999, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:03,016 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:03,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32982 deadline: 1732785843006, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:03,017 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:03,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33034 deadline: 1732785843008, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:03,017 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:03,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32964 deadline: 1732785843008, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:03,119 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:03,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33014 deadline: 1732785843111, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:03,119 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:03,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32966 deadline: 1732785843112, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:03,122 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:03,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32982 deadline: 1732785843118, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:03,126 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:03,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33034 deadline: 1732785843119, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:03,126 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:03,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32964 deadline: 1732785843121, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:03,329 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:03,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32966 deadline: 1732785843322, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:03,329 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:03,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33014 deadline: 1732785843322, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:03,329 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:03,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32982 deadline: 1732785843324, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:03,334 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:03,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33034 deadline: 1732785843328, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:03,338 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:03,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32964 deadline: 1732785843331, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:03,368 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=254 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/542eb56564074c56a77572ecf93a709f 2024-11-28T09:23:03,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/f145e16be63d43229ef675048f24edef is 50, key is test_row_0/B:col10/1732785781809/Put/seqid=0 2024-11-28T09:23:03,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742422_1598 (size=12151) 2024-11-28T09:23:03,440 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=254 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/f145e16be63d43229ef675048f24edef 2024-11-28T09:23:03,451 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/0489161974e74f5ebeeda2f4bb28dbfd is 50, key is test_row_0/C:col10/1732785781809/Put/seqid=0 2024-11-28T09:23:03,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742423_1599 (size=12151) 2024-11-28T09:23:03,496 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=254 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/0489161974e74f5ebeeda2f4bb28dbfd 2024-11-28T09:23:03,504 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/542eb56564074c56a77572ecf93a709f as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/542eb56564074c56a77572ecf93a709f 2024-11-28T09:23:03,512 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/542eb56564074c56a77572ecf93a709f, entries=150, sequenceid=254, filesize=11.9 K 2024-11-28T09:23:03,513 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/f145e16be63d43229ef675048f24edef as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/f145e16be63d43229ef675048f24edef 2024-11-28T09:23:03,517 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/f145e16be63d43229ef675048f24edef, entries=150, sequenceid=254, filesize=11.9 K 2024-11-28T09:23:03,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/0489161974e74f5ebeeda2f4bb28dbfd as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/0489161974e74f5ebeeda2f4bb28dbfd 2024-11-28T09:23:03,525 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/0489161974e74f5ebeeda2f4bb28dbfd, entries=150, sequenceid=254, filesize=11.9 K 2024-11-28T09:23:03,527 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for f65103e784e3773002ae1a64e3eece97 in 617ms, sequenceid=254, compaction requested=true 2024-11-28T09:23:03,527 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2538): Flush status journal for f65103e784e3773002ae1a64e3eece97: 2024-11-28T09:23:03,527 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:23:03,527 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=139 2024-11-28T09:23:03,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4106): Remote procedure done, pid=139 2024-11-28T09:23:03,530 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=139, resume processing ppid=138 2024-11-28T09:23:03,530 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=139, ppid=138, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6950 sec 2024-11-28T09:23:03,532 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=138, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees in 1.7000 sec 2024-11-28T09:23:03,638 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f65103e784e3773002ae1a64e3eece97 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-28T09:23:03,638 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f65103e784e3773002ae1a64e3eece97, store=A 2024-11-28T09:23:03,638 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:03,638 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f65103e784e3773002ae1a64e3eece97, store=B 2024-11-28T09:23:03,638 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:03,638 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f65103e784e3773002ae1a64e3eece97, store=C 2024-11-28T09:23:03,638 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:03,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on f65103e784e3773002ae1a64e3eece97 2024-11-28T09:23:03,646 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/922331aacb8946d0982490d7d2fae8f4 is 50, key is test_row_0/A:col10/1732785783006/Put/seqid=0 2024-11-28T09:23:03,656 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:03,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32982 deadline: 1732785843648, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:03,656 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:03,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32964 deadline: 1732785843651, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:03,662 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:03,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33014 deadline: 1732785843653, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:03,662 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:03,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32966 deadline: 1732785843654, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:03,668 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:03,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33034 deadline: 1732785843657, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:03,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742424_1600 (size=14741) 2024-11-28T09:23:03,674 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=279 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/922331aacb8946d0982490d7d2fae8f4 2024-11-28T09:23:03,681 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/438a57fa1cf343dd94f3333293f30344 is 50, key is test_row_0/B:col10/1732785783006/Put/seqid=0 2024-11-28T09:23:03,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742425_1601 (size=12301) 2024-11-28T09:23:03,735 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=279 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/438a57fa1cf343dd94f3333293f30344 2024-11-28T09:23:03,754 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/e3549aff3e6d4ab1b4adac4717a5ccaf is 50, key is test_row_0/C:col10/1732785783006/Put/seqid=0 2024-11-28T09:23:03,768 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:03,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32964 deadline: 1732785843758, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:03,769 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:03,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32982 deadline: 1732785843758, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:03,769 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:03,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33014 deadline: 1732785843763, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:03,775 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:03,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32966 deadline: 1732785843766, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:03,784 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:03,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33034 deadline: 1732785843772, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:03,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742426_1602 (size=12301) 2024-11-28T09:23:03,791 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=279 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/e3549aff3e6d4ab1b4adac4717a5ccaf 2024-11-28T09:23:03,796 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/922331aacb8946d0982490d7d2fae8f4 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/922331aacb8946d0982490d7d2fae8f4 2024-11-28T09:23:03,800 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/922331aacb8946d0982490d7d2fae8f4, entries=200, sequenceid=279, filesize=14.4 K 2024-11-28T09:23:03,801 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/438a57fa1cf343dd94f3333293f30344 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/438a57fa1cf343dd94f3333293f30344 2024-11-28T09:23:03,806 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/438a57fa1cf343dd94f3333293f30344, entries=150, sequenceid=279, filesize=12.0 K 2024-11-28T09:23:03,806 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/e3549aff3e6d4ab1b4adac4717a5ccaf as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/e3549aff3e6d4ab1b4adac4717a5ccaf 2024-11-28T09:23:03,812 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/e3549aff3e6d4ab1b4adac4717a5ccaf, entries=150, sequenceid=279, filesize=12.0 K 2024-11-28T09:23:03,813 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for f65103e784e3773002ae1a64e3eece97 in 176ms, sequenceid=279, compaction requested=true 2024-11-28T09:23:03,813 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f65103e784e3773002ae1a64e3eece97: 2024-11-28T09:23:03,813 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T09:23:03,814 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f65103e784e3773002ae1a64e3eece97:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T09:23:03,814 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:23:03,814 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T09:23:03,814 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 56486 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T09:23:03,815 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1540): f65103e784e3773002ae1a64e3eece97/A is initiating minor compaction (all files) 2024-11-28T09:23:03,815 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f65103e784e3773002ae1a64e3eece97/A in TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:23:03,815 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/b81390e8b207424fa25563d1ede778dc, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/0bc6485396004155a04fe85a54d5b2b9, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/542eb56564074c56a77572ecf93a709f, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/922331aacb8946d0982490d7d2fae8f4] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp, totalSize=55.2 K 2024-11-28T09:23:03,815 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting b81390e8b207424fa25563d1ede778dc, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1732785780428 2024-11-28T09:23:03,815 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49266 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T09:23:03,816 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): f65103e784e3773002ae1a64e3eece97/B is initiating minor compaction (all files) 2024-11-28T09:23:03,816 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f65103e784e3773002ae1a64e3eece97/B in TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:23:03,816 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/fbf4858f89d84e2dbcf641c4ec3f9d43, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/1389b470ec224e2990d9caeead4edbe7, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/f145e16be63d43229ef675048f24edef, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/438a57fa1cf343dd94f3333293f30344] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp, totalSize=48.1 K 2024-11-28T09:23:03,816 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0bc6485396004155a04fe85a54d5b2b9, keycount=250, bloomtype=ROW, size=16.5 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1732785781140 2024-11-28T09:23:03,816 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting fbf4858f89d84e2dbcf641c4ec3f9d43, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1732785780428 2024-11-28T09:23:03,816 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 542eb56564074c56a77572ecf93a709f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=254, earliestPutTs=1732785781799 2024-11-28T09:23:03,816 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f65103e784e3773002ae1a64e3eece97:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T09:23:03,817 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 922331aacb8946d0982490d7d2fae8f4, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=279, earliestPutTs=1732785782996 2024-11-28T09:23:03,817 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 1389b470ec224e2990d9caeead4edbe7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1732785781150 2024-11-28T09:23:03,817 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting f145e16be63d43229ef675048f24edef, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=254, earliestPutTs=1732785781799 2024-11-28T09:23:03,818 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:23:03,818 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f65103e784e3773002ae1a64e3eece97:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T09:23:03,818 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 438a57fa1cf343dd94f3333293f30344, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=279, earliestPutTs=1732785783006 2024-11-28T09:23:03,818 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:23:03,840 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f65103e784e3773002ae1a64e3eece97#A#compaction#516 average throughput is 1.64 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:23:03,840 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/5bde39d17e5c4097ad3bc6df020cbe38 is 50, key is test_row_0/A:col10/1732785783006/Put/seqid=0 2024-11-28T09:23:03,843 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f65103e784e3773002ae1a64e3eece97#B#compaction#517 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:23:03,844 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/d6bbfa90606a4a638b48999b3d04ae85 is 50, key is test_row_0/B:col10/1732785783006/Put/seqid=0 2024-11-28T09:23:03,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742428_1604 (size=12439) 2024-11-28T09:23:03,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742427_1603 (size=12439) 2024-11-28T09:23:03,897 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/5bde39d17e5c4097ad3bc6df020cbe38 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/5bde39d17e5c4097ad3bc6df020cbe38 2024-11-28T09:23:03,914 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in f65103e784e3773002ae1a64e3eece97/A of f65103e784e3773002ae1a64e3eece97 into 5bde39d17e5c4097ad3bc6df020cbe38(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:23:03,915 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f65103e784e3773002ae1a64e3eece97: 2024-11-28T09:23:03,915 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97., storeName=f65103e784e3773002ae1a64e3eece97/A, priority=12, startTime=1732785783813; duration=0sec 2024-11-28T09:23:03,915 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:23:03,915 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f65103e784e3773002ae1a64e3eece97:A 2024-11-28T09:23:03,915 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T09:23:03,918 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49266 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T09:23:03,918 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1540): f65103e784e3773002ae1a64e3eece97/C is initiating minor compaction (all files) 2024-11-28T09:23:03,918 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f65103e784e3773002ae1a64e3eece97/C in TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:23:03,918 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/b255cf01c29d44b7a705911ced76e1b5, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/db749e7d99b348c88ce94b7665b5b7a7, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/0489161974e74f5ebeeda2f4bb28dbfd, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/e3549aff3e6d4ab1b4adac4717a5ccaf] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp, totalSize=48.1 K 2024-11-28T09:23:03,919 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting b255cf01c29d44b7a705911ced76e1b5, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1732785780428 2024-11-28T09:23:03,919 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting db749e7d99b348c88ce94b7665b5b7a7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=238, earliestPutTs=1732785781150 2024-11-28T09:23:03,919 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0489161974e74f5ebeeda2f4bb28dbfd, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=254, earliestPutTs=1732785781799 2024-11-28T09:23:03,919 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting e3549aff3e6d4ab1b4adac4717a5ccaf, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=279, earliestPutTs=1732785783006 2024-11-28T09:23:03,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-28T09:23:03,939 INFO [Thread-2397 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 138 completed 2024-11-28T09:23:03,941 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T09:23:03,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] procedure2.ProcedureExecutor(1098): Stored pid=140, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees 2024-11-28T09:23:03,943 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=140, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T09:23:03,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-28T09:23:03,944 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=140, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T09:23:03,944 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=141, ppid=140, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T09:23:03,951 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f65103e784e3773002ae1a64e3eece97#C#compaction#518 average throughput is 1.09 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:23:03,952 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/a05798753a844375a95095b2964319c4 is 50, key is test_row_0/C:col10/1732785783006/Put/seqid=0 2024-11-28T09:23:03,980 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f65103e784e3773002ae1a64e3eece97 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-28T09:23:03,981 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f65103e784e3773002ae1a64e3eece97, store=A 2024-11-28T09:23:03,981 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:03,981 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f65103e784e3773002ae1a64e3eece97, store=B 2024-11-28T09:23:03,981 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:03,981 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f65103e784e3773002ae1a64e3eece97, store=C 2024-11-28T09:23:03,981 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:03,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on f65103e784e3773002ae1a64e3eece97 2024-11-28T09:23:03,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742429_1605 (size=12439) 2024-11-28T09:23:04,003 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/a05798753a844375a95095b2964319c4 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/a05798753a844375a95095b2964319c4 2024-11-28T09:23:04,007 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in f65103e784e3773002ae1a64e3eece97/C of f65103e784e3773002ae1a64e3eece97 into a05798753a844375a95095b2964319c4(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:23:04,007 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f65103e784e3773002ae1a64e3eece97: 2024-11-28T09:23:04,007 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97., storeName=f65103e784e3773002ae1a64e3eece97/C, priority=12, startTime=1732785783818; duration=0sec 2024-11-28T09:23:04,007 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:23:04,007 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f65103e784e3773002ae1a64e3eece97:C 2024-11-28T09:23:04,010 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/bfbb650d243e437bb89819c5e1ec424a is 50, key is test_row_0/A:col10/1732785783654/Put/seqid=0 2024-11-28T09:23:04,038 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:04,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32964 deadline: 1732785844025, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:04,039 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:04,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33034 deadline: 1732785844027, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:04,039 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:04,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32982 deadline: 1732785844032, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:04,041 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:04,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33014 deadline: 1732785844033, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:04,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-28T09:23:04,045 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:04,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32966 deadline: 1732785844038, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:04,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742430_1606 (size=12301) 2024-11-28T09:23:04,055 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=292 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/bfbb650d243e437bb89819c5e1ec424a 2024-11-28T09:23:04,065 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/fd6e737f70c748ae82a2db100e09e7a9 is 50, key is test_row_0/B:col10/1732785783654/Put/seqid=0 2024-11-28T09:23:04,096 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:23:04,096 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-28T09:23:04,096 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:23:04,096 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. as already flushing 2024-11-28T09:23:04,096 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:23:04,096 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:04,097 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:04,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:04,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742431_1607 (size=12301) 2024-11-28T09:23:04,143 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:04,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32964 deadline: 1732785844139, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:04,144 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:04,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33034 deadline: 1732785844140, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:04,144 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:04,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33014 deadline: 1732785844142, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:04,151 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:04,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32982 deadline: 1732785844144, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:04,152 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:04,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32966 deadline: 1732785844147, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:04,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-28T09:23:04,249 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:23:04,250 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-28T09:23:04,250 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:23:04,250 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. as already flushing 2024-11-28T09:23:04,250 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:23:04,250 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:04,251 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:04,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:04,292 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/d6bbfa90606a4a638b48999b3d04ae85 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/d6bbfa90606a4a638b48999b3d04ae85 2024-11-28T09:23:04,300 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in f65103e784e3773002ae1a64e3eece97/B of f65103e784e3773002ae1a64e3eece97 into d6bbfa90606a4a638b48999b3d04ae85(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:23:04,301 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f65103e784e3773002ae1a64e3eece97: 2024-11-28T09:23:04,301 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97., storeName=f65103e784e3773002ae1a64e3eece97/B, priority=12, startTime=1732785783814; duration=0sec 2024-11-28T09:23:04,301 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:23:04,301 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f65103e784e3773002ae1a64e3eece97:B 2024-11-28T09:23:04,352 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:04,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32964 deadline: 1732785844345, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:04,352 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:04,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33014 deadline: 1732785844345, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:04,353 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:04,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33034 deadline: 1732785844346, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:04,361 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:04,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32982 deadline: 1732785844353, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:04,364 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:04,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32966 deadline: 1732785844356, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:04,403 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:23:04,408 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-28T09:23:04,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:23:04,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. as already flushing 2024-11-28T09:23:04,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:23:04,408 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:04,408 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:04,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:04,518 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=292 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/fd6e737f70c748ae82a2db100e09e7a9 2024-11-28T09:23:04,530 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/32e975d64e7c4ba89e1355bb062b8753 is 50, key is test_row_0/C:col10/1732785783654/Put/seqid=0 2024-11-28T09:23:04,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-28T09:23:04,561 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:23:04,564 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-28T09:23:04,564 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:23:04,564 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. as already flushing 2024-11-28T09:23:04,564 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:23:04,565 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:04,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:04,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:04,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742432_1608 (size=12301) 2024-11-28T09:23:04,577 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=292 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/32e975d64e7c4ba89e1355bb062b8753 2024-11-28T09:23:04,582 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/bfbb650d243e437bb89819c5e1ec424a as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/bfbb650d243e437bb89819c5e1ec424a 2024-11-28T09:23:04,585 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/bfbb650d243e437bb89819c5e1ec424a, entries=150, sequenceid=292, filesize=12.0 K 2024-11-28T09:23:04,586 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/fd6e737f70c748ae82a2db100e09e7a9 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/fd6e737f70c748ae82a2db100e09e7a9 2024-11-28T09:23:04,593 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/fd6e737f70c748ae82a2db100e09e7a9, entries=150, sequenceid=292, filesize=12.0 K 2024-11-28T09:23:04,594 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/32e975d64e7c4ba89e1355bb062b8753 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/32e975d64e7c4ba89e1355bb062b8753 2024-11-28T09:23:04,599 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/32e975d64e7c4ba89e1355bb062b8753, entries=150, sequenceid=292, filesize=12.0 K 2024-11-28T09:23:04,600 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for f65103e784e3773002ae1a64e3eece97 in 620ms, sequenceid=292, compaction requested=false 2024-11-28T09:23:04,600 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f65103e784e3773002ae1a64e3eece97: 2024-11-28T09:23:04,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on f65103e784e3773002ae1a64e3eece97 2024-11-28T09:23:04,660 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f65103e784e3773002ae1a64e3eece97 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-28T09:23:04,660 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f65103e784e3773002ae1a64e3eece97, store=A 2024-11-28T09:23:04,661 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:04,661 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f65103e784e3773002ae1a64e3eece97, store=B 2024-11-28T09:23:04,661 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:04,661 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f65103e784e3773002ae1a64e3eece97, store=C 2024-11-28T09:23:04,661 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:04,677 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:04,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32964 deadline: 1732785844672, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:04,678 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:04,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32982 deadline: 1732785844674, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:04,678 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:04,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33014 deadline: 1732785844675, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:04,679 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:04,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32966 deadline: 1732785844677, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:04,681 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/4951e0d77b954c8d8fa126fb096230c7 is 50, key is test_row_0/A:col10/1732785784658/Put/seqid=0 2024-11-28T09:23:04,688 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:04,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33034 deadline: 1732785844678, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:04,717 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:23:04,718 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-28T09:23:04,718 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:23:04,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742433_1609 (size=14741) 2024-11-28T09:23:04,718 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. as already flushing 2024-11-28T09:23:04,718 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:23:04,718 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] handler.RSProcedureHandler(58): pid=141 java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:04,718 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=141 java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:04,719 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=319 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/4951e0d77b954c8d8fa126fb096230c7 2024-11-28T09:23:04,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=141 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:04,731 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/374d55a223dc4bf9bfec023eaa5ba774 is 50, key is test_row_0/B:col10/1732785784658/Put/seqid=0 2024-11-28T09:23:04,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742434_1610 (size=12301) 2024-11-28T09:23:04,761 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=319 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/374d55a223dc4bf9bfec023eaa5ba774 2024-11-28T09:23:04,768 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/c7e3f0fe881a45dd95fc1efeb2bc811a is 50, key is test_row_0/C:col10/1732785784658/Put/seqid=0 2024-11-28T09:23:04,785 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:04,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32964 deadline: 1732785844779, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:04,786 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:04,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32982 deadline: 1732785844780, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:04,786 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:04,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32966 deadline: 1732785844780, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:04,787 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:04,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33014 deadline: 1732785844781, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:04,796 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:04,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33034 deadline: 1732785844789, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:04,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742435_1611 (size=12301) 2024-11-28T09:23:04,802 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=319 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/c7e3f0fe881a45dd95fc1efeb2bc811a 2024-11-28T09:23:04,812 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/4951e0d77b954c8d8fa126fb096230c7 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/4951e0d77b954c8d8fa126fb096230c7 2024-11-28T09:23:04,815 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/4951e0d77b954c8d8fa126fb096230c7, entries=200, sequenceid=319, filesize=14.4 K 2024-11-28T09:23:04,816 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/374d55a223dc4bf9bfec023eaa5ba774 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/374d55a223dc4bf9bfec023eaa5ba774 2024-11-28T09:23:04,822 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/374d55a223dc4bf9bfec023eaa5ba774, entries=150, sequenceid=319, filesize=12.0 K 2024-11-28T09:23:04,822 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/c7e3f0fe881a45dd95fc1efeb2bc811a as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/c7e3f0fe881a45dd95fc1efeb2bc811a 2024-11-28T09:23:04,832 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/c7e3f0fe881a45dd95fc1efeb2bc811a, entries=150, sequenceid=319, filesize=12.0 K 2024-11-28T09:23:04,833 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for f65103e784e3773002ae1a64e3eece97 in 173ms, sequenceid=319, compaction requested=true 2024-11-28T09:23:04,833 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f65103e784e3773002ae1a64e3eece97: 2024-11-28T09:23:04,833 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:23:04,834 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f65103e784e3773002ae1a64e3eece97:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T09:23:04,834 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:23:04,834 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:23:04,835 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f65103e784e3773002ae1a64e3eece97:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T09:23:04,835 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:23:04,835 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f65103e784e3773002ae1a64e3eece97:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T09:23:04,835 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:23:04,837 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39481 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:23:04,837 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1540): f65103e784e3773002ae1a64e3eece97/A is initiating minor compaction (all files) 2024-11-28T09:23:04,837 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f65103e784e3773002ae1a64e3eece97/A in TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:23:04,838 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/5bde39d17e5c4097ad3bc6df020cbe38, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/bfbb650d243e437bb89819c5e1ec424a, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/4951e0d77b954c8d8fa126fb096230c7] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp, totalSize=38.6 K 2024-11-28T09:23:04,838 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5bde39d17e5c4097ad3bc6df020cbe38, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=279, earliestPutTs=1732785783006 2024-11-28T09:23:04,839 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting bfbb650d243e437bb89819c5e1ec424a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=292, earliestPutTs=1732785783646 2024-11-28T09:23:04,839 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37041 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:23:04,839 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): f65103e784e3773002ae1a64e3eece97/B is initiating minor compaction (all files) 2024-11-28T09:23:04,839 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f65103e784e3773002ae1a64e3eece97/B in TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:23:04,839 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/d6bbfa90606a4a638b48999b3d04ae85, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/fd6e737f70c748ae82a2db100e09e7a9, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/374d55a223dc4bf9bfec023eaa5ba774] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp, totalSize=36.2 K 2024-11-28T09:23:04,840 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4951e0d77b954c8d8fa126fb096230c7, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=319, earliestPutTs=1732785784031 2024-11-28T09:23:04,840 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting d6bbfa90606a4a638b48999b3d04ae85, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=279, earliestPutTs=1732785783006 2024-11-28T09:23:04,840 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting fd6e737f70c748ae82a2db100e09e7a9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=292, earliestPutTs=1732785783646 2024-11-28T09:23:04,842 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 374d55a223dc4bf9bfec023eaa5ba774, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=319, earliestPutTs=1732785784032 2024-11-28T09:23:04,857 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f65103e784e3773002ae1a64e3eece97#B#compaction#525 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:23:04,857 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/ac07082e69044f14a4a03b255b8ee45f is 50, key is test_row_0/B:col10/1732785784658/Put/seqid=0 2024-11-28T09:23:04,861 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f65103e784e3773002ae1a64e3eece97#A#compaction#526 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:23:04,861 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/52120116b66b412a8e1f324035f533b0 is 50, key is test_row_0/A:col10/1732785784658/Put/seqid=0 2024-11-28T09:23:04,870 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:23:04,871 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-28T09:23:04,871 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:23:04,871 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2837): Flushing f65103e784e3773002ae1a64e3eece97 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-28T09:23:04,872 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f65103e784e3773002ae1a64e3eece97, store=A 2024-11-28T09:23:04,872 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:04,872 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f65103e784e3773002ae1a64e3eece97, store=B 2024-11-28T09:23:04,872 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:04,872 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f65103e784e3773002ae1a64e3eece97, store=C 2024-11-28T09:23:04,872 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:04,881 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/cf57bbf77bef4d9e862e01ac8cc65946 is 50, key is test_row_0/A:col10/1732785784676/Put/seqid=0 2024-11-28T09:23:04,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742436_1612 (size=12541) 2024-11-28T09:23:04,890 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/ac07082e69044f14a4a03b255b8ee45f as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/ac07082e69044f14a4a03b255b8ee45f 2024-11-28T09:23:04,896 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f65103e784e3773002ae1a64e3eece97/B of f65103e784e3773002ae1a64e3eece97 into ac07082e69044f14a4a03b255b8ee45f(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:23:04,896 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f65103e784e3773002ae1a64e3eece97: 2024-11-28T09:23:04,896 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97., storeName=f65103e784e3773002ae1a64e3eece97/B, priority=13, startTime=1732785784834; duration=0sec 2024-11-28T09:23:04,896 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:23:04,896 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f65103e784e3773002ae1a64e3eece97:B 2024-11-28T09:23:04,896 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:23:04,898 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37041 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:23:04,898 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): f65103e784e3773002ae1a64e3eece97/C is initiating minor compaction (all files) 2024-11-28T09:23:04,898 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f65103e784e3773002ae1a64e3eece97/C in TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:23:04,898 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/a05798753a844375a95095b2964319c4, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/32e975d64e7c4ba89e1355bb062b8753, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/c7e3f0fe881a45dd95fc1efeb2bc811a] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp, totalSize=36.2 K 2024-11-28T09:23:04,899 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting a05798753a844375a95095b2964319c4, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=279, earliestPutTs=1732785783006 2024-11-28T09:23:04,900 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 32e975d64e7c4ba89e1355bb062b8753, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=292, earliestPutTs=1732785783646 2024-11-28T09:23:04,900 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting c7e3f0fe881a45dd95fc1efeb2bc811a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=319, earliestPutTs=1732785784032 2024-11-28T09:23:04,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742437_1613 (size=12541) 2024-11-28T09:23:04,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742438_1614 (size=12301) 2024-11-28T09:23:04,931 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f65103e784e3773002ae1a64e3eece97#C#compaction#528 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:23:04,931 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/4fff88b6220c4afe8a8f25d1cadf09a6 is 50, key is test_row_0/C:col10/1732785784658/Put/seqid=0 2024-11-28T09:23:04,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742439_1615 (size=12541) 2024-11-28T09:23:04,992 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. as already flushing 2024-11-28T09:23:04,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on f65103e784e3773002ae1a64e3eece97 2024-11-28T09:23:05,044 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:05,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32966 deadline: 1732785845034, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:05,044 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:05,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32964 deadline: 1732785845035, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:05,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-28T09:23:05,048 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:05,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32982 deadline: 1732785845039, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:05,049 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:05,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33034 deadline: 1732785845040, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:05,049 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:05,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33014 deadline: 1732785845041, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:05,153 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:05,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32966 deadline: 1732785845146, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:05,153 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:05,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32964 deadline: 1732785845146, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:05,157 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:05,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33034 deadline: 1732785845150, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:05,158 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:05,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32982 deadline: 1732785845151, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:05,159 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:05,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33014 deadline: 1732785845151, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:05,318 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/52120116b66b412a8e1f324035f533b0 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/52120116b66b412a8e1f324035f533b0 2024-11-28T09:23:05,322 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f65103e784e3773002ae1a64e3eece97/A of f65103e784e3773002ae1a64e3eece97 into 52120116b66b412a8e1f324035f533b0(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:23:05,322 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f65103e784e3773002ae1a64e3eece97: 2024-11-28T09:23:05,322 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97., storeName=f65103e784e3773002ae1a64e3eece97/A, priority=13, startTime=1732785784833; duration=0sec 2024-11-28T09:23:05,322 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:23:05,322 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f65103e784e3773002ae1a64e3eece97:A 2024-11-28T09:23:05,325 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=330 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/cf57bbf77bef4d9e862e01ac8cc65946 2024-11-28T09:23:05,342 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/31b17153129c4c83a4d86af159117eb0 is 50, key is test_row_0/B:col10/1732785784676/Put/seqid=0 2024-11-28T09:23:05,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742440_1616 (size=12301) 2024-11-28T09:23:05,357 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:05,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32964 deadline: 1732785845355, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:05,358 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:05,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32966 deadline: 1732785845356, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:05,366 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:05,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33034 deadline: 1732785845358, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:05,366 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:05,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33014 deadline: 1732785845360, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:05,366 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:05,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32982 deadline: 1732785845360, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:05,382 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/4fff88b6220c4afe8a8f25d1cadf09a6 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/4fff88b6220c4afe8a8f25d1cadf09a6 2024-11-28T09:23:05,386 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f65103e784e3773002ae1a64e3eece97/C of f65103e784e3773002ae1a64e3eece97 into 4fff88b6220c4afe8a8f25d1cadf09a6(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:23:05,386 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f65103e784e3773002ae1a64e3eece97: 2024-11-28T09:23:05,386 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97., storeName=f65103e784e3773002ae1a64e3eece97/C, priority=13, startTime=1732785784835; duration=0sec 2024-11-28T09:23:05,386 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:23:05,386 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f65103e784e3773002ae1a64e3eece97:C 2024-11-28T09:23:05,671 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:05,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32964 deadline: 1732785845661, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:05,671 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:05,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32966 deadline: 1732785845661, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:05,673 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:05,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33014 deadline: 1732785845668, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:05,674 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:05,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32982 deadline: 1732785845668, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:05,674 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:05,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33034 deadline: 1732785845669, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:05,751 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=330 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/31b17153129c4c83a4d86af159117eb0 2024-11-28T09:23:05,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/6535ee443abd4e1ebe0135ef0c9f09bb is 50, key is test_row_0/C:col10/1732785784676/Put/seqid=0 2024-11-28T09:23:05,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742441_1617 (size=12301) 2024-11-28T09:23:05,786 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=330 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/6535ee443abd4e1ebe0135ef0c9f09bb 2024-11-28T09:23:05,793 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/cf57bbf77bef4d9e862e01ac8cc65946 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/cf57bbf77bef4d9e862e01ac8cc65946 2024-11-28T09:23:05,799 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/cf57bbf77bef4d9e862e01ac8cc65946, entries=150, sequenceid=330, filesize=12.0 K 2024-11-28T09:23:05,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/31b17153129c4c83a4d86af159117eb0 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/31b17153129c4c83a4d86af159117eb0 2024-11-28T09:23:05,808 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/31b17153129c4c83a4d86af159117eb0, entries=150, sequenceid=330, filesize=12.0 K 2024-11-28T09:23:05,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/6535ee443abd4e1ebe0135ef0c9f09bb as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/6535ee443abd4e1ebe0135ef0c9f09bb 2024-11-28T09:23:05,819 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/6535ee443abd4e1ebe0135ef0c9f09bb, entries=150, sequenceid=330, filesize=12.0 K 2024-11-28T09:23:05,820 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for f65103e784e3773002ae1a64e3eece97 in 949ms, sequenceid=330, compaction requested=false 2024-11-28T09:23:05,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2538): Flush status journal for f65103e784e3773002ae1a64e3eece97: 2024-11-28T09:23:05,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:23:05,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=141 2024-11-28T09:23:05,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4106): Remote procedure done, pid=141 2024-11-28T09:23:05,823 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=141, resume processing ppid=140 2024-11-28T09:23:05,823 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=141, ppid=140, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8770 sec 2024-11-28T09:23:05,825 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=140, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees in 1.8820 sec 2024-11-28T09:23:06,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-28T09:23:06,047 INFO [Thread-2397 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 140 completed 2024-11-28T09:23:06,049 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T09:23:06,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] procedure2.ProcedureExecutor(1098): Stored pid=142, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees 2024-11-28T09:23:06,050 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=142, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T09:23:06,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-28T09:23:06,051 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=142, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T09:23:06,051 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=143, ppid=142, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T09:23:06,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-28T09:23:06,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on f65103e784e3773002ae1a64e3eece97 2024-11-28T09:23:06,178 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f65103e784e3773002ae1a64e3eece97 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-28T09:23:06,179 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f65103e784e3773002ae1a64e3eece97, store=A 2024-11-28T09:23:06,179 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:06,179 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f65103e784e3773002ae1a64e3eece97, store=B 2024-11-28T09:23:06,179 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:06,179 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f65103e784e3773002ae1a64e3eece97, store=C 2024-11-28T09:23:06,179 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:06,187 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/b5bff66c375546cfb705a78ffa41d47b is 50, key is test_row_0/A:col10/1732785785034/Put/seqid=0 2024-11-28T09:23:06,189 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:06,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33014 deadline: 1732785846184, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:06,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742442_1618 (size=12301) 2024-11-28T09:23:06,191 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=359 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/b5bff66c375546cfb705a78ffa41d47b 2024-11-28T09:23:06,192 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:06,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33034 deadline: 1732785846188, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:06,192 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:06,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32982 deadline: 1732785846188, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:06,195 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:06,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32966 deadline: 1732785846189, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:06,195 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:06,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32964 deadline: 1732785846189, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:06,204 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/fae7f0bb079d45888d7c68d992db8886 is 50, key is test_row_0/B:col10/1732785785034/Put/seqid=0 2024-11-28T09:23:06,207 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:23:06,207 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-28T09:23:06,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:23:06,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. as already flushing 2024-11-28T09:23:06,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:23:06,207 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:06,208 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:06,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742443_1619 (size=12301) 2024-11-28T09:23:06,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:06,210 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=359 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/fae7f0bb079d45888d7c68d992db8886 2024-11-28T09:23:06,217 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/52d400e2eef04667ac542eef2475e13d is 50, key is test_row_0/C:col10/1732785785034/Put/seqid=0 2024-11-28T09:23:06,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742444_1620 (size=12301) 2024-11-28T09:23:06,234 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=359 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/52d400e2eef04667ac542eef2475e13d 2024-11-28T09:23:06,238 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/b5bff66c375546cfb705a78ffa41d47b as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/b5bff66c375546cfb705a78ffa41d47b 2024-11-28T09:23:06,241 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/b5bff66c375546cfb705a78ffa41d47b, entries=150, sequenceid=359, filesize=12.0 K 2024-11-28T09:23:06,242 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/fae7f0bb079d45888d7c68d992db8886 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/fae7f0bb079d45888d7c68d992db8886 2024-11-28T09:23:06,246 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/fae7f0bb079d45888d7c68d992db8886, entries=150, sequenceid=359, filesize=12.0 K 2024-11-28T09:23:06,247 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/52d400e2eef04667ac542eef2475e13d as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/52d400e2eef04667ac542eef2475e13d 2024-11-28T09:23:06,251 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/52d400e2eef04667ac542eef2475e13d, entries=150, sequenceid=359, filesize=12.0 K 2024-11-28T09:23:06,252 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for f65103e784e3773002ae1a64e3eece97 in 74ms, sequenceid=359, compaction requested=true 2024-11-28T09:23:06,252 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f65103e784e3773002ae1a64e3eece97: 2024-11-28T09:23:06,253 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:23:06,253 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f65103e784e3773002ae1a64e3eece97:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T09:23:06,253 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:23:06,253 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:23:06,253 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f65103e784e3773002ae1a64e3eece97:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T09:23:06,253 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:23:06,253 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f65103e784e3773002ae1a64e3eece97:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T09:23:06,253 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:23:06,255 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37143 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:23:06,255 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1540): f65103e784e3773002ae1a64e3eece97/A is initiating minor compaction (all files) 2024-11-28T09:23:06,255 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f65103e784e3773002ae1a64e3eece97/A in TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:23:06,255 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/52120116b66b412a8e1f324035f533b0, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/cf57bbf77bef4d9e862e01ac8cc65946, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/b5bff66c375546cfb705a78ffa41d47b] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp, totalSize=36.3 K 2024-11-28T09:23:06,255 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37143 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:23:06,255 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): f65103e784e3773002ae1a64e3eece97/B is initiating minor compaction (all files) 2024-11-28T09:23:06,255 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f65103e784e3773002ae1a64e3eece97/B in TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:23:06,256 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/ac07082e69044f14a4a03b255b8ee45f, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/31b17153129c4c83a4d86af159117eb0, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/fae7f0bb079d45888d7c68d992db8886] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp, totalSize=36.3 K 2024-11-28T09:23:06,256 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 52120116b66b412a8e1f324035f533b0, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=319, earliestPutTs=1732785784032 2024-11-28T09:23:06,256 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting cf57bbf77bef4d9e862e01ac8cc65946, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=330, earliestPutTs=1732785784668 2024-11-28T09:23:06,256 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting ac07082e69044f14a4a03b255b8ee45f, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=319, earliestPutTs=1732785784032 2024-11-28T09:23:06,256 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 31b17153129c4c83a4d86af159117eb0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=330, earliestPutTs=1732785784668 2024-11-28T09:23:06,257 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting b5bff66c375546cfb705a78ffa41d47b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=359, earliestPutTs=1732785785034 2024-11-28T09:23:06,257 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting fae7f0bb079d45888d7c68d992db8886, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=359, earliestPutTs=1732785785034 2024-11-28T09:23:06,264 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f65103e784e3773002ae1a64e3eece97#A#compaction#534 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:23:06,265 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/6e74cd22152d4846808ad8c8e9bca386 is 50, key is test_row_0/A:col10/1732785785034/Put/seqid=0 2024-11-28T09:23:06,267 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f65103e784e3773002ae1a64e3eece97#B#compaction#535 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:23:06,267 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/d3f3218da9a64087a946039d4595a591 is 50, key is test_row_0/B:col10/1732785785034/Put/seqid=0 2024-11-28T09:23:06,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742445_1621 (size=12643) 2024-11-28T09:23:06,291 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/6e74cd22152d4846808ad8c8e9bca386 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/6e74cd22152d4846808ad8c8e9bca386 2024-11-28T09:23:06,295 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f65103e784e3773002ae1a64e3eece97/A of f65103e784e3773002ae1a64e3eece97 into 6e74cd22152d4846808ad8c8e9bca386(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:23:06,295 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f65103e784e3773002ae1a64e3eece97: 2024-11-28T09:23:06,295 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97., storeName=f65103e784e3773002ae1a64e3eece97/A, priority=13, startTime=1732785786252; duration=0sec 2024-11-28T09:23:06,296 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:23:06,296 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f65103e784e3773002ae1a64e3eece97:A 2024-11-28T09:23:06,296 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:23:06,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on f65103e784e3773002ae1a64e3eece97 2024-11-28T09:23:06,296 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f65103e784e3773002ae1a64e3eece97 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-28T09:23:06,296 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f65103e784e3773002ae1a64e3eece97, store=A 2024-11-28T09:23:06,296 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:06,297 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f65103e784e3773002ae1a64e3eece97, store=B 2024-11-28T09:23:06,297 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:06,297 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f65103e784e3773002ae1a64e3eece97, store=C 2024-11-28T09:23:06,297 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:06,297 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37143 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:23:06,298 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1540): f65103e784e3773002ae1a64e3eece97/C is initiating minor compaction (all files) 2024-11-28T09:23:06,298 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f65103e784e3773002ae1a64e3eece97/C in TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:23:06,298 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/4fff88b6220c4afe8a8f25d1cadf09a6, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/6535ee443abd4e1ebe0135ef0c9f09bb, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/52d400e2eef04667ac542eef2475e13d] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp, totalSize=36.3 K 2024-11-28T09:23:06,299 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4fff88b6220c4afe8a8f25d1cadf09a6, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=319, earliestPutTs=1732785784032 2024-11-28T09:23:06,299 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6535ee443abd4e1ebe0135ef0c9f09bb, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=330, earliestPutTs=1732785784668 2024-11-28T09:23:06,300 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 52d400e2eef04667ac542eef2475e13d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=359, earliestPutTs=1732785785034 2024-11-28T09:23:06,302 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/4dc2f6d01d4b40f1bfa650ee19fd4b96 is 50, key is test_row_0/A:col10/1732785786186/Put/seqid=0 2024-11-28T09:23:06,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742446_1622 (size=12643) 2024-11-28T09:23:06,309 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/d3f3218da9a64087a946039d4595a591 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/d3f3218da9a64087a946039d4595a591 2024-11-28T09:23:06,313 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f65103e784e3773002ae1a64e3eece97#C#compaction#537 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:23:06,313 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/35d1c41318f745178ee6ad36c7dd623c is 50, key is test_row_0/C:col10/1732785785034/Put/seqid=0 2024-11-28T09:23:06,316 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f65103e784e3773002ae1a64e3eece97/B of f65103e784e3773002ae1a64e3eece97 into d3f3218da9a64087a946039d4595a591(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:23:06,316 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f65103e784e3773002ae1a64e3eece97: 2024-11-28T09:23:06,316 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97., storeName=f65103e784e3773002ae1a64e3eece97/B, priority=13, startTime=1732785786253; duration=0sec 2024-11-28T09:23:06,316 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:23:06,316 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f65103e784e3773002ae1a64e3eece97:B 2024-11-28T09:23:06,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742448_1624 (size=12643) 2024-11-28T09:23:06,321 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/35d1c41318f745178ee6ad36c7dd623c as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/35d1c41318f745178ee6ad36c7dd623c 2024-11-28T09:23:06,324 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f65103e784e3773002ae1a64e3eece97/C of f65103e784e3773002ae1a64e3eece97 into 35d1c41318f745178ee6ad36c7dd623c(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:23:06,325 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f65103e784e3773002ae1a64e3eece97: 2024-11-28T09:23:06,325 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97., storeName=f65103e784e3773002ae1a64e3eece97/C, priority=13, startTime=1732785786253; duration=0sec 2024-11-28T09:23:06,325 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:23:06,325 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f65103e784e3773002ae1a64e3eece97:C 2024-11-28T09:23:06,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742447_1623 (size=12297) 2024-11-28T09:23:06,340 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=371 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/4dc2f6d01d4b40f1bfa650ee19fd4b96 2024-11-28T09:23:06,342 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:06,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32982 deadline: 1732785846335, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:06,342 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:06,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32964 deadline: 1732785846336, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:06,342 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:06,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33034 deadline: 1732785846337, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:06,344 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:06,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32966 deadline: 1732785846340, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:06,344 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:06,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33014 deadline: 1732785846340, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:06,347 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/7b73f99909dd47ac9ef20ac9ebbfe486 is 50, key is test_row_0/B:col10/1732785786186/Put/seqid=0 2024-11-28T09:23:06,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742449_1625 (size=9857) 2024-11-28T09:23:06,351 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=371 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/7b73f99909dd47ac9ef20ac9ebbfe486 2024-11-28T09:23:06,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-28T09:23:06,356 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/d6d5b143eb0145d1893d8635d1b8618f is 50, key is test_row_0/C:col10/1732785786186/Put/seqid=0 2024-11-28T09:23:06,359 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:23:06,360 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-28T09:23:06,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:23:06,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. as already flushing 2024-11-28T09:23:06,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:23:06,360 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:06,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:06,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:06,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742450_1626 (size=9857) 2024-11-28T09:23:06,446 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:06,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32964 deadline: 1732785846443, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:06,446 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:06,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32982 deadline: 1732785846443, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:06,447 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:06,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33034 deadline: 1732785846443, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:06,447 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:06,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32966 deadline: 1732785846445, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:06,449 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:06,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33014 deadline: 1732785846446, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:06,512 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:23:06,512 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-28T09:23:06,512 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:23:06,513 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. as already flushing 2024-11-28T09:23:06,513 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:23:06,513 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:06,513 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:06,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:06,650 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:06,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32982 deadline: 1732785846647, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:06,651 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:06,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32966 deadline: 1732785846648, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:06,651 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:06,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32964 deadline: 1732785846648, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:06,651 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:06,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33034 deadline: 1732785846648, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:06,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-28T09:23:06,655 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:06,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33014 deadline: 1732785846651, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:06,664 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:23:06,664 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-28T09:23:06,664 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:23:06,664 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. as already flushing 2024-11-28T09:23:06,664 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:23:06,664 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:06,664 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:06,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:06,762 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=371 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/d6d5b143eb0145d1893d8635d1b8618f 2024-11-28T09:23:06,766 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/4dc2f6d01d4b40f1bfa650ee19fd4b96 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/4dc2f6d01d4b40f1bfa650ee19fd4b96 2024-11-28T09:23:06,769 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/4dc2f6d01d4b40f1bfa650ee19fd4b96, entries=150, sequenceid=371, filesize=12.0 K 2024-11-28T09:23:06,770 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/7b73f99909dd47ac9ef20ac9ebbfe486 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/7b73f99909dd47ac9ef20ac9ebbfe486 2024-11-28T09:23:06,773 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/7b73f99909dd47ac9ef20ac9ebbfe486, entries=100, sequenceid=371, filesize=9.6 K 2024-11-28T09:23:06,773 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/d6d5b143eb0145d1893d8635d1b8618f as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/d6d5b143eb0145d1893d8635d1b8618f 2024-11-28T09:23:06,777 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/d6d5b143eb0145d1893d8635d1b8618f, entries=100, sequenceid=371, filesize=9.6 K 2024-11-28T09:23:06,777 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for f65103e784e3773002ae1a64e3eece97 in 481ms, sequenceid=371, compaction requested=false 2024-11-28T09:23:06,777 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f65103e784e3773002ae1a64e3eece97: 2024-11-28T09:23:06,816 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:23:06,816 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-28T09:23:06,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:23:06,817 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2837): Flushing f65103e784e3773002ae1a64e3eece97 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-28T09:23:06,817 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f65103e784e3773002ae1a64e3eece97, store=A 2024-11-28T09:23:06,817 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:06,817 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f65103e784e3773002ae1a64e3eece97, store=B 2024-11-28T09:23:06,817 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:06,817 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f65103e784e3773002ae1a64e3eece97, store=C 2024-11-28T09:23:06,817 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:06,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/45e7466d0d5e46c49a29c49d448eccef is 50, key is test_row_0/A:col10/1732785786334/Put/seqid=0 2024-11-28T09:23:06,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742451_1627 (size=12301) 2024-11-28T09:23:06,954 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. as already flushing 2024-11-28T09:23:06,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on f65103e784e3773002ae1a64e3eece97 2024-11-28T09:23:06,962 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:06,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32964 deadline: 1732785846957, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:06,962 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:06,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32966 deadline: 1732785846958, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:06,963 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:06,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33034 deadline: 1732785846958, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:06,963 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:06,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32982 deadline: 1732785846961, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:06,967 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:06,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33014 deadline: 1732785846962, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:07,067 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:07,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32966 deadline: 1732785847064, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:07,067 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:07,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32964 deadline: 1732785847064, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:07,067 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:07,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33034 deadline: 1732785847064, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:07,067 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:07,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32982 deadline: 1732785847064, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:07,072 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:07,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33014 deadline: 1732785847068, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:07,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-28T09:23:07,225 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=399 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/45e7466d0d5e46c49a29c49d448eccef 2024-11-28T09:23:07,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/89f44da0a96f466db5062c91f41b704a is 50, key is test_row_0/B:col10/1732785786334/Put/seqid=0 2024-11-28T09:23:07,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742452_1628 (size=12301) 2024-11-28T09:23:07,247 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=399 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/89f44da0a96f466db5062c91f41b704a 2024-11-28T09:23:07,253 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/fd1869e84bf9410a88e43f46b2ffbab3 is 50, key is test_row_0/C:col10/1732785786334/Put/seqid=0 2024-11-28T09:23:07,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742453_1629 (size=12301) 2024-11-28T09:23:07,267 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=399 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/fd1869e84bf9410a88e43f46b2ffbab3 2024-11-28T09:23:07,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/45e7466d0d5e46c49a29c49d448eccef as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/45e7466d0d5e46c49a29c49d448eccef 2024-11-28T09:23:07,271 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:07,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32966 deadline: 1732785847268, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:07,272 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:07,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33034 deadline: 1732785847268, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:07,272 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:07,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32982 deadline: 1732785847269, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:07,272 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:07,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32964 deadline: 1732785847269, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:07,276 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/45e7466d0d5e46c49a29c49d448eccef, entries=150, sequenceid=399, filesize=12.0 K 2024-11-28T09:23:07,276 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/89f44da0a96f466db5062c91f41b704a as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/89f44da0a96f466db5062c91f41b704a 2024-11-28T09:23:07,280 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/89f44da0a96f466db5062c91f41b704a, entries=150, sequenceid=399, filesize=12.0 K 2024-11-28T09:23:07,280 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:07,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33014 deadline: 1732785847273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:07,281 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/fd1869e84bf9410a88e43f46b2ffbab3 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/fd1869e84bf9410a88e43f46b2ffbab3 2024-11-28T09:23:07,284 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/fd1869e84bf9410a88e43f46b2ffbab3, entries=150, sequenceid=399, filesize=12.0 K 2024-11-28T09:23:07,285 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for f65103e784e3773002ae1a64e3eece97 in 468ms, sequenceid=399, compaction requested=true 2024-11-28T09:23:07,285 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2538): Flush status journal for f65103e784e3773002ae1a64e3eece97: 2024-11-28T09:23:07,285 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:23:07,285 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=143 2024-11-28T09:23:07,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4106): Remote procedure done, pid=143 2024-11-28T09:23:07,287 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=143, resume processing ppid=142 2024-11-28T09:23:07,287 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=143, ppid=142, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2350 sec 2024-11-28T09:23:07,288 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=142, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees in 1.2380 sec 2024-11-28T09:23:07,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on f65103e784e3773002ae1a64e3eece97 2024-11-28T09:23:07,578 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f65103e784e3773002ae1a64e3eece97 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-28T09:23:07,578 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f65103e784e3773002ae1a64e3eece97, store=A 2024-11-28T09:23:07,578 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:07,578 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f65103e784e3773002ae1a64e3eece97, store=B 2024-11-28T09:23:07,578 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:07,578 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f65103e784e3773002ae1a64e3eece97, store=C 2024-11-28T09:23:07,578 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:07,582 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/f0d62d02a2cf4b519c8e4b77dde2224f is 50, key is test_row_0/A:col10/1732785786958/Put/seqid=0 2024-11-28T09:23:07,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742454_1630 (size=14741) 2024-11-28T09:23:07,617 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:07,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33014 deadline: 1732785847612, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:07,618 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:07,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32982 deadline: 1732785847612, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:07,621 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:07,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32966 deadline: 1732785847613, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:07,623 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:07,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33034 deadline: 1732785847617, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:07,623 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:07,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32964 deadline: 1732785847618, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:07,725 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:07,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33014 deadline: 1732785847718, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:07,726 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:07,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32982 deadline: 1732785847719, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:07,726 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:07,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32966 deadline: 1732785847722, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:07,727 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:07,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33034 deadline: 1732785847724, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:07,728 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:07,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32964 deadline: 1732785847724, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:07,928 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:07,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32982 deadline: 1732785847927, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:07,932 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:07,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33014 deadline: 1732785847927, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:07,932 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:07,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32966 deadline: 1732785847928, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:07,932 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:07,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33034 deadline: 1732785847928, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:07,934 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:07,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32964 deadline: 1732785847930, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:07,986 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=410 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/f0d62d02a2cf4b519c8e4b77dde2224f 2024-11-28T09:23:07,993 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/397e4cca9f8f4dc2aa722d7fe8a632db is 50, key is test_row_0/B:col10/1732785786958/Put/seqid=0 2024-11-28T09:23:07,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742455_1631 (size=12301) 2024-11-28T09:23:08,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-28T09:23:08,156 INFO [Thread-2397 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 142 completed 2024-11-28T09:23:08,157 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T09:23:08,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] procedure2.ProcedureExecutor(1098): Stored pid=144, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees 2024-11-28T09:23:08,158 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=144, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T09:23:08,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-28T09:23:08,159 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=144, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T09:23:08,159 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=145, ppid=144, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T09:23:08,236 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:08,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32982 deadline: 1732785848231, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:08,239 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:08,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32966 deadline: 1732785848233, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:08,239 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:08,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33034 deadline: 1732785848233, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:08,239 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:08,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33014 deadline: 1732785848235, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:08,240 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:08,240 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32964 deadline: 1732785848236, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:08,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-28T09:23:08,310 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:23:08,311 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-11-28T09:23:08,311 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:23:08,311 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. as already flushing 2024-11-28T09:23:08,311 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:23:08,311 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] handler.RSProcedureHandler(58): pid=145 java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:08,311 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=145 java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:08,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=145 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:08,396 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=410 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/397e4cca9f8f4dc2aa722d7fe8a632db 2024-11-28T09:23:08,403 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/bcf4dc8f55b94b7797702a2220e8410e is 50, key is test_row_0/C:col10/1732785786958/Put/seqid=0 2024-11-28T09:23:08,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742456_1632 (size=12301) 2024-11-28T09:23:08,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-28T09:23:08,463 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:23:08,463 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-11-28T09:23:08,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:23:08,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. as already flushing 2024-11-28T09:23:08,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:23:08,464 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] handler.RSProcedureHandler(58): pid=145 java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:08,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=145 java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:08,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=145 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:08,616 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:23:08,616 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-11-28T09:23:08,616 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:23:08,616 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. as already flushing 2024-11-28T09:23:08,616 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:23:08,616 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] handler.RSProcedureHandler(58): pid=145 java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:08,617 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=145 java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:08,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=145 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:08,744 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:08,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32982 deadline: 1732785848739, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:08,745 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:08,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32966 deadline: 1732785848740, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:08,745 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:08,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32964 deadline: 1732785848740, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:08,747 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:08,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33014 deadline: 1732785848743, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:08,747 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:08,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33034 deadline: 1732785848744, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:08,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-28T09:23:08,768 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:23:08,769 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-11-28T09:23:08,769 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:23:08,769 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. as already flushing 2024-11-28T09:23:08,769 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:23:08,769 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] handler.RSProcedureHandler(58): pid=145 java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:08,769 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=145 java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:08,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=145 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:08,807 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=410 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/bcf4dc8f55b94b7797702a2220e8410e 2024-11-28T09:23:08,811 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/f0d62d02a2cf4b519c8e4b77dde2224f as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/f0d62d02a2cf4b519c8e4b77dde2224f 2024-11-28T09:23:08,814 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/f0d62d02a2cf4b519c8e4b77dde2224f, entries=200, sequenceid=410, filesize=14.4 K 2024-11-28T09:23:08,815 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/397e4cca9f8f4dc2aa722d7fe8a632db as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/397e4cca9f8f4dc2aa722d7fe8a632db 2024-11-28T09:23:08,818 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/397e4cca9f8f4dc2aa722d7fe8a632db, entries=150, sequenceid=410, filesize=12.0 K 2024-11-28T09:23:08,818 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/bcf4dc8f55b94b7797702a2220e8410e as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/bcf4dc8f55b94b7797702a2220e8410e 2024-11-28T09:23:08,821 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/bcf4dc8f55b94b7797702a2220e8410e, entries=150, sequenceid=410, filesize=12.0 K 2024-11-28T09:23:08,822 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for f65103e784e3773002ae1a64e3eece97 in 1244ms, sequenceid=410, compaction requested=true 2024-11-28T09:23:08,822 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f65103e784e3773002ae1a64e3eece97: 2024-11-28T09:23:08,822 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f65103e784e3773002ae1a64e3eece97:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T09:23:08,822 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:23:08,823 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f65103e784e3773002ae1a64e3eece97:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T09:23:08,823 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T09:23:08,823 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:23:08,823 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f65103e784e3773002ae1a64e3eece97:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T09:23:08,823 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T09:23:08,823 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:23:08,823 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 47102 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T09:23:08,823 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 51982 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T09:23:08,824 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1540): f65103e784e3773002ae1a64e3eece97/A is initiating minor compaction (all files) 2024-11-28T09:23:08,824 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): f65103e784e3773002ae1a64e3eece97/B is initiating minor compaction (all files) 2024-11-28T09:23:08,824 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f65103e784e3773002ae1a64e3eece97/A in TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:23:08,824 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f65103e784e3773002ae1a64e3eece97/B in TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:23:08,824 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/6e74cd22152d4846808ad8c8e9bca386, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/4dc2f6d01d4b40f1bfa650ee19fd4b96, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/45e7466d0d5e46c49a29c49d448eccef, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/f0d62d02a2cf4b519c8e4b77dde2224f] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp, totalSize=50.8 K 2024-11-28T09:23:08,824 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/d3f3218da9a64087a946039d4595a591, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/7b73f99909dd47ac9ef20ac9ebbfe486, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/89f44da0a96f466db5062c91f41b704a, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/397e4cca9f8f4dc2aa722d7fe8a632db] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp, totalSize=46.0 K 2024-11-28T09:23:08,824 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6e74cd22152d4846808ad8c8e9bca386, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=359, earliestPutTs=1732785785034 2024-11-28T09:23:08,824 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting d3f3218da9a64087a946039d4595a591, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=359, earliestPutTs=1732785785034 2024-11-28T09:23:08,824 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4dc2f6d01d4b40f1bfa650ee19fd4b96, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=371, earliestPutTs=1732785786186 2024-11-28T09:23:08,824 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 7b73f99909dd47ac9ef20ac9ebbfe486, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=371, earliestPutTs=1732785786186 2024-11-28T09:23:08,825 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 89f44da0a96f466db5062c91f41b704a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=399, earliestPutTs=1732785786334 2024-11-28T09:23:08,825 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 45e7466d0d5e46c49a29c49d448eccef, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=399, earliestPutTs=1732785786334 2024-11-28T09:23:08,825 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting f0d62d02a2cf4b519c8e4b77dde2224f, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=410, earliestPutTs=1732785786956 2024-11-28T09:23:08,825 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 397e4cca9f8f4dc2aa722d7fe8a632db, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=410, earliestPutTs=1732785786958 2024-11-28T09:23:08,833 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f65103e784e3773002ae1a64e3eece97#A#compaction#546 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:23:08,833 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f65103e784e3773002ae1a64e3eece97#B#compaction#547 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:23:08,833 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/0349015aea3f437085bdf928236c35af is 50, key is test_row_0/A:col10/1732785786958/Put/seqid=0 2024-11-28T09:23:08,834 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/4e2b3afd1bc14e30bb8c52a62e427c70 is 50, key is test_row_0/B:col10/1732785786958/Put/seqid=0 2024-11-28T09:23:08,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742457_1633 (size=12779) 2024-11-28T09:23:08,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742458_1634 (size=12779) 2024-11-28T09:23:08,922 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:23:08,922 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-11-28T09:23:08,922 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:23:08,923 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2837): Flushing f65103e784e3773002ae1a64e3eece97 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-28T09:23:08,923 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f65103e784e3773002ae1a64e3eece97, store=A 2024-11-28T09:23:08,923 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:08,923 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f65103e784e3773002ae1a64e3eece97, store=B 2024-11-28T09:23:08,923 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:08,923 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f65103e784e3773002ae1a64e3eece97, store=C 2024-11-28T09:23:08,923 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:08,929 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/09f36efd477f4bb19d01c151b64b8697 is 50, key is test_row_0/A:col10/1732785787603/Put/seqid=0 2024-11-28T09:23:08,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742459_1635 (size=12301) 2024-11-28T09:23:09,242 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/0349015aea3f437085bdf928236c35af as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/0349015aea3f437085bdf928236c35af 2024-11-28T09:23:09,246 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/4e2b3afd1bc14e30bb8c52a62e427c70 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/4e2b3afd1bc14e30bb8c52a62e427c70 2024-11-28T09:23:09,247 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in f65103e784e3773002ae1a64e3eece97/A of f65103e784e3773002ae1a64e3eece97 into 0349015aea3f437085bdf928236c35af(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:23:09,247 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f65103e784e3773002ae1a64e3eece97: 2024-11-28T09:23:09,247 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97., storeName=f65103e784e3773002ae1a64e3eece97/A, priority=12, startTime=1732785788822; duration=0sec 2024-11-28T09:23:09,247 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:23:09,247 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f65103e784e3773002ae1a64e3eece97:A 2024-11-28T09:23:09,247 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T09:23:09,249 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 47102 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T09:23:09,249 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1540): f65103e784e3773002ae1a64e3eece97/C is initiating minor compaction (all files) 2024-11-28T09:23:09,249 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f65103e784e3773002ae1a64e3eece97/C in TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:23:09,249 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/35d1c41318f745178ee6ad36c7dd623c, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/d6d5b143eb0145d1893d8635d1b8618f, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/fd1869e84bf9410a88e43f46b2ffbab3, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/bcf4dc8f55b94b7797702a2220e8410e] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp, totalSize=46.0 K 2024-11-28T09:23:09,250 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 35d1c41318f745178ee6ad36c7dd623c, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=359, earliestPutTs=1732785785034 2024-11-28T09:23:09,250 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting d6d5b143eb0145d1893d8635d1b8618f, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=371, earliestPutTs=1732785786186 2024-11-28T09:23:09,250 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in f65103e784e3773002ae1a64e3eece97/B of f65103e784e3773002ae1a64e3eece97 into 4e2b3afd1bc14e30bb8c52a62e427c70(size=12.5 K), total size for store is 12.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:23:09,250 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f65103e784e3773002ae1a64e3eece97: 2024-11-28T09:23:09,250 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97., storeName=f65103e784e3773002ae1a64e3eece97/B, priority=12, startTime=1732785788822; duration=0sec 2024-11-28T09:23:09,250 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:23:09,250 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f65103e784e3773002ae1a64e3eece97:B 2024-11-28T09:23:09,251 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting fd1869e84bf9410a88e43f46b2ffbab3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=399, earliestPutTs=1732785786334 2024-11-28T09:23:09,251 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting bcf4dc8f55b94b7797702a2220e8410e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=410, earliestPutTs=1732785786958 2024-11-28T09:23:09,257 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f65103e784e3773002ae1a64e3eece97#C#compaction#549 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:23:09,257 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/696bae8146624d14aed0d187659edbd4 is 50, key is test_row_0/C:col10/1732785786958/Put/seqid=0 2024-11-28T09:23:09,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742460_1636 (size=12779) 2024-11-28T09:23:09,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-28T09:23:09,336 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=435 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/09f36efd477f4bb19d01c151b64b8697 2024-11-28T09:23:09,343 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/ec147b41ab1a4141a596497dd39f2822 is 50, key is test_row_0/B:col10/1732785787603/Put/seqid=0 2024-11-28T09:23:09,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742461_1637 (size=12301) 2024-11-28T09:23:09,348 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=435 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/ec147b41ab1a4141a596497dd39f2822 2024-11-28T09:23:09,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/2727b0d37d3d4526a2c076fd5a39268d is 50, key is test_row_0/C:col10/1732785787603/Put/seqid=0 2024-11-28T09:23:09,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742462_1638 (size=12301) 2024-11-28T09:23:09,358 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=435 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/2727b0d37d3d4526a2c076fd5a39268d 2024-11-28T09:23:09,362 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/09f36efd477f4bb19d01c151b64b8697 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/09f36efd477f4bb19d01c151b64b8697 2024-11-28T09:23:09,365 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/09f36efd477f4bb19d01c151b64b8697, entries=150, sequenceid=435, filesize=12.0 K 2024-11-28T09:23:09,365 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/ec147b41ab1a4141a596497dd39f2822 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/ec147b41ab1a4141a596497dd39f2822 2024-11-28T09:23:09,370 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/ec147b41ab1a4141a596497dd39f2822, entries=150, sequenceid=435, filesize=12.0 K 2024-11-28T09:23:09,370 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/2727b0d37d3d4526a2c076fd5a39268d as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/2727b0d37d3d4526a2c076fd5a39268d 2024-11-28T09:23:09,374 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/2727b0d37d3d4526a2c076fd5a39268d, entries=150, sequenceid=435, filesize=12.0 K 2024-11-28T09:23:09,375 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=0 B/0 for f65103e784e3773002ae1a64e3eece97 in 453ms, sequenceid=435, compaction requested=false 2024-11-28T09:23:09,375 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2538): Flush status journal for f65103e784e3773002ae1a64e3eece97: 2024-11-28T09:23:09,375 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:23:09,375 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=145 2024-11-28T09:23:09,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4106): Remote procedure done, pid=145 2024-11-28T09:23:09,377 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=145, resume processing ppid=144 2024-11-28T09:23:09,378 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=145, ppid=144, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2170 sec 2024-11-28T09:23:09,379 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=144, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees in 1.2210 sec 2024-11-28T09:23:09,666 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/696bae8146624d14aed0d187659edbd4 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/696bae8146624d14aed0d187659edbd4 2024-11-28T09:23:09,670 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in f65103e784e3773002ae1a64e3eece97/C of f65103e784e3773002ae1a64e3eece97 into 696bae8146624d14aed0d187659edbd4(size=12.5 K), total size for store is 24.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:23:09,670 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f65103e784e3773002ae1a64e3eece97: 2024-11-28T09:23:09,670 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97., storeName=f65103e784e3773002ae1a64e3eece97/C, priority=12, startTime=1732785788823; duration=0sec 2024-11-28T09:23:09,670 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:23:09,670 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f65103e784e3773002ae1a64e3eece97:C 2024-11-28T09:23:09,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on f65103e784e3773002ae1a64e3eece97 2024-11-28T09:23:09,759 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f65103e784e3773002ae1a64e3eece97 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-28T09:23:09,759 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f65103e784e3773002ae1a64e3eece97, store=A 2024-11-28T09:23:09,759 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:09,760 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f65103e784e3773002ae1a64e3eece97, store=B 2024-11-28T09:23:09,760 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:09,760 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f65103e784e3773002ae1a64e3eece97, store=C 2024-11-28T09:23:09,760 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:09,763 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/94172ab90e464842b4e66965c91126a0 is 50, key is test_row_0/A:col10/1732785789758/Put/seqid=0 2024-11-28T09:23:09,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742463_1639 (size=14741) 2024-11-28T09:23:09,792 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:09,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33014 deadline: 1732785849788, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:09,795 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:09,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32964 deadline: 1732785849790, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:09,798 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:09,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33034 deadline: 1732785849791, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:09,801 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:09,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32982 deadline: 1732785849791, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:09,801 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:09,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32966 deadline: 1732785849792, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:09,898 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:09,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33014 deadline: 1732785849893, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:09,900 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:09,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 211 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32964 deadline: 1732785849896, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:09,904 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:09,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33034 deadline: 1732785849899, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:09,909 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:09,909 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:09,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32982 deadline: 1732785849902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:09,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32966 deadline: 1732785849902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:10,104 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:10,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33014 deadline: 1732785850100, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:10,104 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:10,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32964 deadline: 1732785850101, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:10,113 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:10,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33034 deadline: 1732785850105, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:10,113 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:10,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32966 deadline: 1732785850110, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:10,113 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:10,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32982 deadline: 1732785850110, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:10,168 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=449 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/94172ab90e464842b4e66965c91126a0 2024-11-28T09:23:10,176 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/d1cf62a1071348f5acd1b1c00e2dc283 is 50, key is test_row_0/B:col10/1732785789758/Put/seqid=0 2024-11-28T09:23:10,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742464_1640 (size=12301) 2024-11-28T09:23:10,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-28T09:23:10,262 INFO [Thread-2397 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 144 completed 2024-11-28T09:23:10,264 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T09:23:10,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] procedure2.ProcedureExecutor(1098): Stored pid=146, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=146, table=TestAcidGuarantees 2024-11-28T09:23:10,265 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=146, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=146, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T09:23:10,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-11-28T09:23:10,266 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=146, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=146, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T09:23:10,266 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=147, ppid=146, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T09:23:10,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-11-28T09:23:10,407 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:10,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33014 deadline: 1732785850405, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:10,410 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:10,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32964 deadline: 1732785850407, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:10,417 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:23:10,417 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:10,417 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=147 2024-11-28T09:23:10,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33034 deadline: 1732785850415, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:10,417 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:23:10,418 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. as already flushing 2024-11-28T09:23:10,418 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:23:10,418 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:10,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32966 deadline: 1732785850416, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:10,418 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] handler.RSProcedureHandler(58): pid=147 java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:10,418 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=147 java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:10,418 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:10,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32982 deadline: 1732785850417, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:10,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=147 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:10,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-11-28T09:23:10,570 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:23:10,570 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=147 2024-11-28T09:23:10,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:23:10,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. as already flushing 2024-11-28T09:23:10,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:23:10,570 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] handler.RSProcedureHandler(58): pid=147 java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:10,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=147 java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:10,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=147 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:10,592 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=449 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/d1cf62a1071348f5acd1b1c00e2dc283 2024-11-28T09:23:10,598 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/7cb61df0f6494b868822cd1b2f7e6082 is 50, key is test_row_0/C:col10/1732785789758/Put/seqid=0 2024-11-28T09:23:10,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742465_1641 (size=12301) 2024-11-28T09:23:10,602 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=449 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/7cb61df0f6494b868822cd1b2f7e6082 2024-11-28T09:23:10,605 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/94172ab90e464842b4e66965c91126a0 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/94172ab90e464842b4e66965c91126a0 2024-11-28T09:23:10,609 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/94172ab90e464842b4e66965c91126a0, entries=200, sequenceid=449, filesize=14.4 K 2024-11-28T09:23:10,609 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/d1cf62a1071348f5acd1b1c00e2dc283 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/d1cf62a1071348f5acd1b1c00e2dc283 2024-11-28T09:23:10,612 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/d1cf62a1071348f5acd1b1c00e2dc283, entries=150, sequenceid=449, filesize=12.0 K 2024-11-28T09:23:10,613 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/7cb61df0f6494b868822cd1b2f7e6082 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/7cb61df0f6494b868822cd1b2f7e6082 2024-11-28T09:23:10,615 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/7cb61df0f6494b868822cd1b2f7e6082, entries=150, sequenceid=449, filesize=12.0 K 2024-11-28T09:23:10,616 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for f65103e784e3773002ae1a64e3eece97 in 857ms, sequenceid=449, compaction requested=true 2024-11-28T09:23:10,616 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f65103e784e3773002ae1a64e3eece97: 2024-11-28T09:23:10,616 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f65103e784e3773002ae1a64e3eece97:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T09:23:10,616 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:23:10,616 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:23:10,617 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:23:10,617 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f65103e784e3773002ae1a64e3eece97:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T09:23:10,617 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:23:10,617 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f65103e784e3773002ae1a64e3eece97:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T09:23:10,617 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:23:10,617 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39821 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:23:10,617 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37381 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:23:10,617 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1540): f65103e784e3773002ae1a64e3eece97/A is initiating minor compaction (all files) 2024-11-28T09:23:10,617 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): f65103e784e3773002ae1a64e3eece97/B is initiating minor compaction (all files) 2024-11-28T09:23:10,617 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f65103e784e3773002ae1a64e3eece97/A in TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:23:10,617 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f65103e784e3773002ae1a64e3eece97/B in TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:23:10,618 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/0349015aea3f437085bdf928236c35af, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/09f36efd477f4bb19d01c151b64b8697, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/94172ab90e464842b4e66965c91126a0] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp, totalSize=38.9 K 2024-11-28T09:23:10,618 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/4e2b3afd1bc14e30bb8c52a62e427c70, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/ec147b41ab1a4141a596497dd39f2822, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/d1cf62a1071348f5acd1b1c00e2dc283] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp, totalSize=36.5 K 2024-11-28T09:23:10,618 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0349015aea3f437085bdf928236c35af, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=410, earliestPutTs=1732785786958 2024-11-28T09:23:10,618 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 4e2b3afd1bc14e30bb8c52a62e427c70, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=410, earliestPutTs=1732785786958 2024-11-28T09:23:10,619 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting ec147b41ab1a4141a596497dd39f2822, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=435, earliestPutTs=1732785787603 2024-11-28T09:23:10,619 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 09f36efd477f4bb19d01c151b64b8697, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=435, earliestPutTs=1732785787603 2024-11-28T09:23:10,619 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting d1cf62a1071348f5acd1b1c00e2dc283, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=449, earliestPutTs=1732785789756 2024-11-28T09:23:10,620 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 94172ab90e464842b4e66965c91126a0, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=449, earliestPutTs=1732785789755 2024-11-28T09:23:10,626 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f65103e784e3773002ae1a64e3eece97#B#compaction#555 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:23:10,627 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/6e2a5dc463f54fde98b8f852dd37976d is 50, key is test_row_0/B:col10/1732785789758/Put/seqid=0 2024-11-28T09:23:10,627 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f65103e784e3773002ae1a64e3eece97#A#compaction#556 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:23:10,628 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/d6f3623b4f5c462fb24184f943512169 is 50, key is test_row_0/A:col10/1732785789758/Put/seqid=0 2024-11-28T09:23:10,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742466_1642 (size=12881) 2024-11-28T09:23:10,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742467_1643 (size=12881) 2024-11-28T09:23:10,722 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:23:10,723 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=147 2024-11-28T09:23:10,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:23:10,723 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2837): Flushing f65103e784e3773002ae1a64e3eece97 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-28T09:23:10,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f65103e784e3773002ae1a64e3eece97, store=A 2024-11-28T09:23:10,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:10,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f65103e784e3773002ae1a64e3eece97, store=B 2024-11-28T09:23:10,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:10,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f65103e784e3773002ae1a64e3eece97, store=C 2024-11-28T09:23:10,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:10,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/011bcbd2021d481ead85eb47f1c6a848 is 50, key is test_row_0/A:col10/1732785789791/Put/seqid=0 2024-11-28T09:23:10,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742468_1644 (size=12301) 2024-11-28T09:23:10,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-11-28T09:23:10,918 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. as already flushing 2024-11-28T09:23:10,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on f65103e784e3773002ae1a64e3eece97 2024-11-28T09:23:10,935 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:10,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33014 deadline: 1732785850927, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:10,937 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:10,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32966 deadline: 1732785850928, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:10,937 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:10,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33034 deadline: 1732785850928, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:10,938 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:10,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32982 deadline: 1732785850929, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:10,938 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:10,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32964 deadline: 1732785850935, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:11,036 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/6e2a5dc463f54fde98b8f852dd37976d as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/6e2a5dc463f54fde98b8f852dd37976d 2024-11-28T09:23:11,037 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/d6f3623b4f5c462fb24184f943512169 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/d6f3623b4f5c462fb24184f943512169 2024-11-28T09:23:11,041 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:11,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33014 deadline: 1732785851036, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:11,041 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f65103e784e3773002ae1a64e3eece97/B of f65103e784e3773002ae1a64e3eece97 into 6e2a5dc463f54fde98b8f852dd37976d(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:23:11,041 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f65103e784e3773002ae1a64e3eece97: 2024-11-28T09:23:11,041 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97., storeName=f65103e784e3773002ae1a64e3eece97/B, priority=13, startTime=1732785790616; duration=0sec 2024-11-28T09:23:11,041 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:11,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32966 deadline: 1732785851038, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:11,041 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:23:11,041 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f65103e784e3773002ae1a64e3eece97:B 2024-11-28T09:23:11,041 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:23:11,041 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:11,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33034 deadline: 1732785851039, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:11,042 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:11,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32982 deadline: 1732785851039, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:11,042 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:11,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32964 deadline: 1732785851039, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:11,043 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37381 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:23:11,043 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): f65103e784e3773002ae1a64e3eece97/C is initiating minor compaction (all files) 2024-11-28T09:23:11,043 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f65103e784e3773002ae1a64e3eece97/C in TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:23:11,043 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/696bae8146624d14aed0d187659edbd4, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/2727b0d37d3d4526a2c076fd5a39268d, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/7cb61df0f6494b868822cd1b2f7e6082] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp, totalSize=36.5 K 2024-11-28T09:23:11,043 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f65103e784e3773002ae1a64e3eece97/A of f65103e784e3773002ae1a64e3eece97 into d6f3623b4f5c462fb24184f943512169(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:23:11,043 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f65103e784e3773002ae1a64e3eece97: 2024-11-28T09:23:11,043 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97., storeName=f65103e784e3773002ae1a64e3eece97/A, priority=13, startTime=1732785790616; duration=0sec 2024-11-28T09:23:11,043 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:23:11,043 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f65103e784e3773002ae1a64e3eece97:A 2024-11-28T09:23:11,044 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 696bae8146624d14aed0d187659edbd4, keycount=150, bloomtype=ROW, size=12.5 K, encoding=NONE, compression=NONE, seqNum=410, earliestPutTs=1732785786958 2024-11-28T09:23:11,044 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 2727b0d37d3d4526a2c076fd5a39268d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=435, earliestPutTs=1732785787603 2024-11-28T09:23:11,044 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 7cb61df0f6494b868822cd1b2f7e6082, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=449, earliestPutTs=1732785789756 2024-11-28T09:23:11,049 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f65103e784e3773002ae1a64e3eece97#C#compaction#558 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:23:11,050 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/9a95772817e44005902d815a0df6bee6 is 50, key is test_row_0/C:col10/1732785789758/Put/seqid=0 2024-11-28T09:23:11,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742469_1645 (size=12881) 2024-11-28T09:23:11,056 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/9a95772817e44005902d815a0df6bee6 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/9a95772817e44005902d815a0df6bee6 2024-11-28T09:23:11,059 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f65103e784e3773002ae1a64e3eece97/C of f65103e784e3773002ae1a64e3eece97 into 9a95772817e44005902d815a0df6bee6(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:23:11,059 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f65103e784e3773002ae1a64e3eece97: 2024-11-28T09:23:11,059 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97., storeName=f65103e784e3773002ae1a64e3eece97/C, priority=13, startTime=1732785790617; duration=0sec 2024-11-28T09:23:11,059 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:23:11,059 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f65103e784e3773002ae1a64e3eece97:C 2024-11-28T09:23:11,133 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=474 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/011bcbd2021d481ead85eb47f1c6a848 2024-11-28T09:23:11,139 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/cacfb8cc17af46d7b6183cc64ec30507 is 50, key is test_row_0/B:col10/1732785789791/Put/seqid=0 2024-11-28T09:23:11,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742470_1646 (size=12301) 2024-11-28T09:23:11,243 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:11,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33014 deadline: 1732785851242, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:11,246 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:11,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33034 deadline: 1732785851242, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:11,248 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:11,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32964 deadline: 1732785851243, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:11,248 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:11,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32966 deadline: 1732785851243, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:11,250 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:11,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32982 deadline: 1732785851244, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:11,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-11-28T09:23:11,543 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=474 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/cacfb8cc17af46d7b6183cc64ec30507 2024-11-28T09:23:11,547 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:11,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33014 deadline: 1732785851544, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:11,549 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:11,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 207 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33034 deadline: 1732785851548, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:11,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/ead684eb47a64015aaab12cb32275570 is 50, key is test_row_0/C:col10/1732785789791/Put/seqid=0 2024-11-28T09:23:11,552 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:11,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32966 deadline: 1732785851550, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:11,554 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:11,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 226 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32964 deadline: 1732785851551, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:11,554 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:11,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32982 deadline: 1732785851553, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:11,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742471_1647 (size=12301) 2024-11-28T09:23:11,957 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=474 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/ead684eb47a64015aaab12cb32275570 2024-11-28T09:23:11,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/011bcbd2021d481ead85eb47f1c6a848 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/011bcbd2021d481ead85eb47f1c6a848 2024-11-28T09:23:11,963 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/011bcbd2021d481ead85eb47f1c6a848, entries=150, sequenceid=474, filesize=12.0 K 2024-11-28T09:23:11,964 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/cacfb8cc17af46d7b6183cc64ec30507 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/cacfb8cc17af46d7b6183cc64ec30507 2024-11-28T09:23:11,967 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/cacfb8cc17af46d7b6183cc64ec30507, entries=150, sequenceid=474, filesize=12.0 K 2024-11-28T09:23:11,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/ead684eb47a64015aaab12cb32275570 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/ead684eb47a64015aaab12cb32275570 2024-11-28T09:23:11,971 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/ead684eb47a64015aaab12cb32275570, entries=150, sequenceid=474, filesize=12.0 K 2024-11-28T09:23:11,972 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for f65103e784e3773002ae1a64e3eece97 in 1249ms, sequenceid=474, compaction requested=false 2024-11-28T09:23:11,972 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.HRegion(2538): Flush status journal for f65103e784e3773002ae1a64e3eece97: 2024-11-28T09:23:11,972 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:23:11,972 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=147}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=147 2024-11-28T09:23:11,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4106): Remote procedure done, pid=147 2024-11-28T09:23:11,974 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=147, resume processing ppid=146 2024-11-28T09:23:11,974 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=147, ppid=146, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7070 sec 2024-11-28T09:23:11,977 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=146, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=146, table=TestAcidGuarantees in 1.7120 sec 2024-11-28T09:23:12,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on f65103e784e3773002ae1a64e3eece97 2024-11-28T09:23:12,054 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f65103e784e3773002ae1a64e3eece97 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-28T09:23:12,055 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f65103e784e3773002ae1a64e3eece97, store=A 2024-11-28T09:23:12,055 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:12,055 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f65103e784e3773002ae1a64e3eece97, store=B 2024-11-28T09:23:12,055 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:12,055 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f65103e784e3773002ae1a64e3eece97, store=C 2024-11-28T09:23:12,055 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:12,059 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/278d7916f51e4b4e8c9fa96d80a4767a is 50, key is test_row_0/A:col10/1732785790934/Put/seqid=0 2024-11-28T09:23:12,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742472_1648 (size=17181) 2024-11-28T09:23:12,089 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:12,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33034 deadline: 1732785852083, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:12,094 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:12,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33014 deadline: 1732785852085, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:12,094 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:12,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 232 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32964 deadline: 1732785852086, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:12,094 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:12,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32966 deadline: 1732785852086, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:12,095 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:12,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32982 deadline: 1732785852088, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:12,194 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:12,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33034 deadline: 1732785852190, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:12,198 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:12,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 234 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32964 deadline: 1732785852195, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:12,199 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:12,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33014 deadline: 1732785852196, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:12,199 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:12,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32966 deadline: 1732785852196, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:12,199 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:12,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 226 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32982 deadline: 1732785852196, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:12,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-11-28T09:23:12,369 INFO [Thread-2397 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 146 completed 2024-11-28T09:23:12,370 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T09:23:12,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] procedure2.ProcedureExecutor(1098): Stored pid=148, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=148, table=TestAcidGuarantees 2024-11-28T09:23:12,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-11-28T09:23:12,372 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=148, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=148, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T09:23:12,372 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=148, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=148, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T09:23:12,372 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=149, ppid=148, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T09:23:12,398 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:12,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33034 deadline: 1732785852397, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:12,406 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:12,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 236 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32964 deadline: 1732785852399, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:12,406 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:12,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32966 deadline: 1732785852400, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:12,406 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:12,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 228 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32982 deadline: 1732785852400, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:12,407 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:12,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33014 deadline: 1732785852401, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:12,463 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=490 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/278d7916f51e4b4e8c9fa96d80a4767a 2024-11-28T09:23:12,470 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/289dede91d934803b75a2809ec422fdb is 50, key is test_row_0/B:col10/1732785790934/Put/seqid=0 2024-11-28T09:23:12,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-11-28T09:23:12,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742473_1649 (size=12301) 2024-11-28T09:23:12,523 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:23:12,523 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=149 2024-11-28T09:23:12,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:23:12,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. as already flushing 2024-11-28T09:23:12,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:23:12,524 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] handler.RSProcedureHandler(58): pid=149 java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:12,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=149 java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:12,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=149 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:12,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-11-28T09:23:12,676 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:23:12,676 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=149 2024-11-28T09:23:12,676 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:23:12,676 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. as already flushing 2024-11-28T09:23:12,676 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:23:12,676 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] handler.RSProcedureHandler(58): pid=149 java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:12,676 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=149 java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:12,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=149 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:12,705 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:12,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33034 deadline: 1732785852702, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:12,711 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:12,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 238 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32964 deadline: 1732785852707, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:12,711 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:12,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33014 deadline: 1732785852707, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:12,711 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:12,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 226 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32966 deadline: 1732785852708, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:12,712 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:12,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 230 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32982 deadline: 1732785852708, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:12,828 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:23:12,829 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=149 2024-11-28T09:23:12,829 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:23:12,829 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. as already flushing 2024-11-28T09:23:12,829 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:23:12,829 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] handler.RSProcedureHandler(58): pid=149 java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:12,829 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=149 java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:12,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=149 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:12,873 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=490 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/289dede91d934803b75a2809ec422fdb 2024-11-28T09:23:12,880 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/8f190a55fcff4558a555cdcec7546a97 is 50, key is test_row_0/C:col10/1732785790934/Put/seqid=0 2024-11-28T09:23:12,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742474_1650 (size=12301) 2024-11-28T09:23:12,883 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=490 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/8f190a55fcff4558a555cdcec7546a97 2024-11-28T09:23:12,888 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/278d7916f51e4b4e8c9fa96d80a4767a as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/278d7916f51e4b4e8c9fa96d80a4767a 2024-11-28T09:23:12,891 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/278d7916f51e4b4e8c9fa96d80a4767a, entries=250, sequenceid=490, filesize=16.8 K 2024-11-28T09:23:12,891 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/289dede91d934803b75a2809ec422fdb as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/289dede91d934803b75a2809ec422fdb 2024-11-28T09:23:12,894 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/289dede91d934803b75a2809ec422fdb, entries=150, sequenceid=490, filesize=12.0 K 2024-11-28T09:23:12,896 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/8f190a55fcff4558a555cdcec7546a97 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/8f190a55fcff4558a555cdcec7546a97 2024-11-28T09:23:12,898 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/8f190a55fcff4558a555cdcec7546a97, entries=150, sequenceid=490, filesize=12.0 K 2024-11-28T09:23:12,899 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for f65103e784e3773002ae1a64e3eece97 in 845ms, sequenceid=490, compaction requested=true 2024-11-28T09:23:12,899 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f65103e784e3773002ae1a64e3eece97: 2024-11-28T09:23:12,899 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f65103e784e3773002ae1a64e3eece97:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T09:23:12,899 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:23:12,899 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:23:12,899 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f65103e784e3773002ae1a64e3eece97:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T09:23:12,899 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:23:12,899 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f65103e784e3773002ae1a64e3eece97:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T09:23:12,899 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:23:12,899 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:23:12,901 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 42363 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:23:12,901 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37483 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:23:12,901 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): f65103e784e3773002ae1a64e3eece97/B is initiating minor compaction (all files) 2024-11-28T09:23:12,901 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1540): f65103e784e3773002ae1a64e3eece97/A is initiating minor compaction (all files) 2024-11-28T09:23:12,901 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f65103e784e3773002ae1a64e3eece97/B in TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:23:12,901 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f65103e784e3773002ae1a64e3eece97/A in TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:23:12,901 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/6e2a5dc463f54fde98b8f852dd37976d, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/cacfb8cc17af46d7b6183cc64ec30507, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/289dede91d934803b75a2809ec422fdb] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp, totalSize=36.6 K 2024-11-28T09:23:12,901 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/d6f3623b4f5c462fb24184f943512169, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/011bcbd2021d481ead85eb47f1c6a848, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/278d7916f51e4b4e8c9fa96d80a4767a] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp, totalSize=41.4 K 2024-11-28T09:23:12,902 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 6e2a5dc463f54fde98b8f852dd37976d, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=449, earliestPutTs=1732785789756 2024-11-28T09:23:12,902 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting d6f3623b4f5c462fb24184f943512169, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=449, earliestPutTs=1732785789756 2024-11-28T09:23:12,902 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting cacfb8cc17af46d7b6183cc64ec30507, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=474, earliestPutTs=1732785789790 2024-11-28T09:23:12,902 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 289dede91d934803b75a2809ec422fdb, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=490, earliestPutTs=1732785790927 2024-11-28T09:23:12,903 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 011bcbd2021d481ead85eb47f1c6a848, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=474, earliestPutTs=1732785789790 2024-11-28T09:23:12,903 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 278d7916f51e4b4e8c9fa96d80a4767a, keycount=250, bloomtype=ROW, size=16.8 K, encoding=NONE, compression=NONE, seqNum=490, earliestPutTs=1732785790927 2024-11-28T09:23:12,909 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f65103e784e3773002ae1a64e3eece97#B#compaction#564 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:23:12,910 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/961b4d3ca6e347c4b84b72dc73fbbf28 is 50, key is test_row_0/B:col10/1732785790934/Put/seqid=0 2024-11-28T09:23:12,910 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f65103e784e3773002ae1a64e3eece97#A#compaction#565 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:23:12,911 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/34bc3048d6c0458a9b3bec56cee22f30 is 50, key is test_row_0/A:col10/1732785790934/Put/seqid=0 2024-11-28T09:23:12,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742475_1651 (size=12983) 2024-11-28T09:23:12,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742476_1652 (size=12983) 2024-11-28T09:23:12,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-11-28T09:23:12,981 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:23:12,981 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=149 2024-11-28T09:23:12,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:23:12,981 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(2837): Flushing f65103e784e3773002ae1a64e3eece97 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-28T09:23:12,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f65103e784e3773002ae1a64e3eece97, store=A 2024-11-28T09:23:12,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:12,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f65103e784e3773002ae1a64e3eece97, store=B 2024-11-28T09:23:12,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:12,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f65103e784e3773002ae1a64e3eece97, store=C 2024-11-28T09:23:12,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:12,986 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/307e3fd0e4fd47f7bb33004486c6e022 is 50, key is test_row_0/A:col10/1732785792086/Put/seqid=0 2024-11-28T09:23:12,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742477_1653 (size=12301) 2024-11-28T09:23:13,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on f65103e784e3773002ae1a64e3eece97 2024-11-28T09:23:13,212 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. as already flushing 2024-11-28T09:23:13,234 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:13,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 234 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32982 deadline: 1732785853225, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:13,234 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:13,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 242 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32964 deadline: 1732785853226, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:13,234 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:13,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 225 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33014 deadline: 1732785853226, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:13,234 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:13,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 229 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32966 deadline: 1732785853227, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:13,235 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:13,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 226 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33034 deadline: 1732785853228, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:13,321 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/961b4d3ca6e347c4b84b72dc73fbbf28 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/961b4d3ca6e347c4b84b72dc73fbbf28 2024-11-28T09:23:13,322 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/34bc3048d6c0458a9b3bec56cee22f30 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/34bc3048d6c0458a9b3bec56cee22f30 2024-11-28T09:23:13,325 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f65103e784e3773002ae1a64e3eece97/B of f65103e784e3773002ae1a64e3eece97 into 961b4d3ca6e347c4b84b72dc73fbbf28(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:23:13,325 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f65103e784e3773002ae1a64e3eece97: 2024-11-28T09:23:13,325 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97., storeName=f65103e784e3773002ae1a64e3eece97/B, priority=13, startTime=1732785792899; duration=0sec 2024-11-28T09:23:13,326 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:23:13,326 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f65103e784e3773002ae1a64e3eece97:B 2024-11-28T09:23:13,326 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:23:13,326 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f65103e784e3773002ae1a64e3eece97/A of f65103e784e3773002ae1a64e3eece97 into 34bc3048d6c0458a9b3bec56cee22f30(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:23:13,326 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f65103e784e3773002ae1a64e3eece97: 2024-11-28T09:23:13,326 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97., storeName=f65103e784e3773002ae1a64e3eece97/A, priority=13, startTime=1732785792899; duration=0sec 2024-11-28T09:23:13,326 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:23:13,326 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f65103e784e3773002ae1a64e3eece97:A 2024-11-28T09:23:13,326 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37483 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:23:13,327 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): f65103e784e3773002ae1a64e3eece97/C is initiating minor compaction (all files) 2024-11-28T09:23:13,327 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f65103e784e3773002ae1a64e3eece97/C in TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:23:13,327 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/9a95772817e44005902d815a0df6bee6, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/ead684eb47a64015aaab12cb32275570, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/8f190a55fcff4558a555cdcec7546a97] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp, totalSize=36.6 K 2024-11-28T09:23:13,327 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 9a95772817e44005902d815a0df6bee6, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=449, earliestPutTs=1732785789756 2024-11-28T09:23:13,328 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting ead684eb47a64015aaab12cb32275570, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=474, earliestPutTs=1732785789790 2024-11-28T09:23:13,329 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 8f190a55fcff4558a555cdcec7546a97, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=490, earliestPutTs=1732785790927 2024-11-28T09:23:13,333 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f65103e784e3773002ae1a64e3eece97#C#compaction#567 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:23:13,333 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/8e205dd3e91c4e7abe4c63ca53f185a7 is 50, key is test_row_0/C:col10/1732785790934/Put/seqid=0 2024-11-28T09:23:13,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742478_1654 (size=12983) 2024-11-28T09:23:13,341 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:13,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 236 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32982 deadline: 1732785853335, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:13,342 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:13,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 231 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32966 deadline: 1732785853335, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:13,342 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:13,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 227 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33014 deadline: 1732785853335, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:13,342 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:13,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 244 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32964 deadline: 1732785853335, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:13,342 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:13,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 228 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33034 deadline: 1732785853335, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:13,390 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=513 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/307e3fd0e4fd47f7bb33004486c6e022 2024-11-28T09:23:13,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/4b468de7330e4b358b8cc1e58c89f630 is 50, key is test_row_0/B:col10/1732785792086/Put/seqid=0 2024-11-28T09:23:13,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742479_1655 (size=12301) 2024-11-28T09:23:13,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-11-28T09:23:13,549 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:13,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 238 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32982 deadline: 1732785853542, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:13,549 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:13,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 233 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32966 deadline: 1732785853543, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:13,550 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:13,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 229 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33014 deadline: 1732785853544, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:13,550 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:13,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 246 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32964 deadline: 1732785853544, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:13,550 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:13,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 230 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33034 deadline: 1732785853544, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:13,741 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/8e205dd3e91c4e7abe4c63ca53f185a7 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/8e205dd3e91c4e7abe4c63ca53f185a7 2024-11-28T09:23:13,744 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f65103e784e3773002ae1a64e3eece97/C of f65103e784e3773002ae1a64e3eece97 into 8e205dd3e91c4e7abe4c63ca53f185a7(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:23:13,744 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f65103e784e3773002ae1a64e3eece97: 2024-11-28T09:23:13,744 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97., storeName=f65103e784e3773002ae1a64e3eece97/C, priority=13, startTime=1732785792899; duration=0sec 2024-11-28T09:23:13,745 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:23:13,745 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f65103e784e3773002ae1a64e3eece97:C 2024-11-28T09:23:13,802 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=513 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/4b468de7330e4b358b8cc1e58c89f630 2024-11-28T09:23:13,809 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/c01a69e79a71436eb9a598de2d370c6b is 50, key is test_row_0/C:col10/1732785792086/Put/seqid=0 2024-11-28T09:23:13,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742480_1656 (size=12301) 2024-11-28T09:23:13,813 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=513 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/c01a69e79a71436eb9a598de2d370c6b 2024-11-28T09:23:13,817 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/307e3fd0e4fd47f7bb33004486c6e022 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/307e3fd0e4fd47f7bb33004486c6e022 2024-11-28T09:23:13,820 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/307e3fd0e4fd47f7bb33004486c6e022, entries=150, sequenceid=513, filesize=12.0 K 2024-11-28T09:23:13,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/4b468de7330e4b358b8cc1e58c89f630 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/4b468de7330e4b358b8cc1e58c89f630 2024-11-28T09:23:13,823 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/4b468de7330e4b358b8cc1e58c89f630, entries=150, sequenceid=513, filesize=12.0 K 2024-11-28T09:23:13,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/c01a69e79a71436eb9a598de2d370c6b as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/c01a69e79a71436eb9a598de2d370c6b 2024-11-28T09:23:13,827 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/c01a69e79a71436eb9a598de2d370c6b, entries=150, sequenceid=513, filesize=12.0 K 2024-11-28T09:23:13,827 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for f65103e784e3773002ae1a64e3eece97 in 846ms, sequenceid=513, compaction requested=false 2024-11-28T09:23:13,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.HRegion(2538): Flush status journal for f65103e784e3773002ae1a64e3eece97: 2024-11-28T09:23:13,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:23:13,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=149}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=149 2024-11-28T09:23:13,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4106): Remote procedure done, pid=149 2024-11-28T09:23:13,829 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=149, resume processing ppid=148 2024-11-28T09:23:13,830 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=149, ppid=148, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4560 sec 2024-11-28T09:23:13,831 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=148, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=148, table=TestAcidGuarantees in 1.4600 sec 2024-11-28T09:23:13,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on f65103e784e3773002ae1a64e3eece97 2024-11-28T09:23:13,854 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f65103e784e3773002ae1a64e3eece97 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-28T09:23:13,855 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f65103e784e3773002ae1a64e3eece97, store=A 2024-11-28T09:23:13,855 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:13,855 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f65103e784e3773002ae1a64e3eece97, store=B 2024-11-28T09:23:13,855 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:13,855 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f65103e784e3773002ae1a64e3eece97, store=C 2024-11-28T09:23:13,855 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:13,858 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/f87f6658233d425ba19ad63f9689b8e0 is 50, key is test_row_0/A:col10/1732785793853/Put/seqid=0 2024-11-28T09:23:13,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742481_1657 (size=14741) 2024-11-28T09:23:13,884 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:13,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 239 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32966 deadline: 1732785853875, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:13,885 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:13,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 244 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32982 deadline: 1732785853882, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:13,889 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:13,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 235 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33014 deadline: 1732785853882, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:13,889 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:13,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 252 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32964 deadline: 1732785853883, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:13,889 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:13,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 236 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33034 deadline: 1732785853884, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:13,988 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:13,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 241 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32966 deadline: 1732785853985, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:13,989 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:13,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 246 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32982 deadline: 1732785853985, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:13,994 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:13,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 237 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33014 deadline: 1732785853990, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:13,995 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:13,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 254 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32964 deadline: 1732785853990, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:13,995 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:13,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 238 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33034 deadline: 1732785853990, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:14,195 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:14,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 243 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32966 deadline: 1732785854190, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:14,196 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:14,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 248 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32982 deadline: 1732785854191, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:14,197 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:14,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 239 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33014 deadline: 1732785854195, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:14,201 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:14,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 256 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32964 deadline: 1732785854197, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:14,202 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:14,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 240 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33034 deadline: 1732785854198, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:14,262 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=530 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/f87f6658233d425ba19ad63f9689b8e0 2024-11-28T09:23:14,268 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/b9c267e32e6a478488a646435715c60f is 50, key is test_row_0/B:col10/1732785793853/Put/seqid=0 2024-11-28T09:23:14,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742482_1658 (size=12301) 2024-11-28T09:23:14,273 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=530 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/b9c267e32e6a478488a646435715c60f 2024-11-28T09:23:14,279 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/b7881058214c4bf0a50df9e84991cefc is 50, key is test_row_0/C:col10/1732785793853/Put/seqid=0 2024-11-28T09:23:14,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742483_1659 (size=12301) 2024-11-28T09:23:14,463 DEBUG [Thread-2398 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1b7f20c4 to 127.0.0.1:53251 2024-11-28T09:23:14,463 DEBUG [Thread-2398 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T09:23:14,463 DEBUG [Thread-2400 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5f7c40ba to 127.0.0.1:53251 2024-11-28T09:23:14,463 DEBUG [Thread-2400 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T09:23:14,465 DEBUG [Thread-2404 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0f2423f3 to 127.0.0.1:53251 2024-11-28T09:23:14,465 DEBUG [Thread-2404 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T09:23:14,467 DEBUG [Thread-2406 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x184771cf to 127.0.0.1:53251 2024-11-28T09:23:14,467 DEBUG [Thread-2406 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T09:23:14,468 DEBUG [Thread-2402 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x41b0e7b6 to 127.0.0.1:53251 2024-11-28T09:23:14,468 DEBUG [Thread-2402 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T09:23:14,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=148 2024-11-28T09:23:14,475 INFO [Thread-2397 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 148 completed 2024-11-28T09:23:14,497 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:14,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 250 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32982 deadline: 1732785854496, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:14,498 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:14,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 245 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32966 deadline: 1732785854498, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:14,501 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:14,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 241 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33014 deadline: 1732785854501, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:14,503 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:14,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 258 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:32964 deadline: 1732785854503, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:14,506 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:14,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 242 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:33034 deadline: 1732785854506, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:14,683 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=530 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/b7881058214c4bf0a50df9e84991cefc 2024-11-28T09:23:14,686 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/f87f6658233d425ba19ad63f9689b8e0 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/f87f6658233d425ba19ad63f9689b8e0 2024-11-28T09:23:14,688 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/f87f6658233d425ba19ad63f9689b8e0, entries=200, sequenceid=530, filesize=14.4 K 2024-11-28T09:23:14,689 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/b9c267e32e6a478488a646435715c60f as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/b9c267e32e6a478488a646435715c60f 2024-11-28T09:23:14,691 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/b9c267e32e6a478488a646435715c60f, entries=150, sequenceid=530, filesize=12.0 K 2024-11-28T09:23:14,692 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/b7881058214c4bf0a50df9e84991cefc as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/b7881058214c4bf0a50df9e84991cefc 2024-11-28T09:23:14,694 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/b7881058214c4bf0a50df9e84991cefc, entries=150, sequenceid=530, filesize=12.0 K 2024-11-28T09:23:14,695 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for f65103e784e3773002ae1a64e3eece97 in 841ms, sequenceid=530, compaction requested=true 2024-11-28T09:23:14,695 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f65103e784e3773002ae1a64e3eece97: 2024-11-28T09:23:14,695 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f65103e784e3773002ae1a64e3eece97:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T09:23:14,695 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:23:14,695 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f65103e784e3773002ae1a64e3eece97:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T09:23:14,695 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:23:14,695 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:23:14,695 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store f65103e784e3773002ae1a64e3eece97:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T09:23:14,695 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:23:14,695 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:23:14,696 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:23:14,696 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40025 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:23:14,696 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): f65103e784e3773002ae1a64e3eece97/B is initiating minor compaction (all files) 2024-11-28T09:23:14,696 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1540): f65103e784e3773002ae1a64e3eece97/A is initiating minor compaction (all files) 2024-11-28T09:23:14,696 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f65103e784e3773002ae1a64e3eece97/B in TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:23:14,696 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f65103e784e3773002ae1a64e3eece97/A in TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:23:14,696 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/961b4d3ca6e347c4b84b72dc73fbbf28, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/4b468de7330e4b358b8cc1e58c89f630, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/b9c267e32e6a478488a646435715c60f] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp, totalSize=36.7 K 2024-11-28T09:23:14,696 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/34bc3048d6c0458a9b3bec56cee22f30, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/307e3fd0e4fd47f7bb33004486c6e022, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/f87f6658233d425ba19ad63f9689b8e0] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp, totalSize=39.1 K 2024-11-28T09:23:14,696 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 961b4d3ca6e347c4b84b72dc73fbbf28, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=490, earliestPutTs=1732785790927 2024-11-28T09:23:14,696 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 34bc3048d6c0458a9b3bec56cee22f30, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=490, earliestPutTs=1732785790927 2024-11-28T09:23:14,697 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 4b468de7330e4b358b8cc1e58c89f630, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=513, earliestPutTs=1732785792082 2024-11-28T09:23:14,697 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 307e3fd0e4fd47f7bb33004486c6e022, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=513, earliestPutTs=1732785792082 2024-11-28T09:23:14,697 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting f87f6658233d425ba19ad63f9689b8e0, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=530, earliestPutTs=1732785793224 2024-11-28T09:23:14,697 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting b9c267e32e6a478488a646435715c60f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=530, earliestPutTs=1732785793224 2024-11-28T09:23:14,702 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f65103e784e3773002ae1a64e3eece97#B#compaction#574 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:23:14,702 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): f65103e784e3773002ae1a64e3eece97#A#compaction#573 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:23:14,702 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/971f0154553d4779a2047a017c818e69 is 50, key is test_row_0/A:col10/1732785793853/Put/seqid=0 2024-11-28T09:23:14,702 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/13cade4e5763453bb79eab527233e106 is 50, key is test_row_0/B:col10/1732785793853/Put/seqid=0 2024-11-28T09:23:14,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742484_1660 (size=13085) 2024-11-28T09:23:14,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742485_1661 (size=13085) 2024-11-28T09:23:15,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on f65103e784e3773002ae1a64e3eece97 2024-11-28T09:23:15,002 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing f65103e784e3773002ae1a64e3eece97 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-28T09:23:15,002 DEBUG [Thread-2395 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2b0c2472 to 127.0.0.1:53251 2024-11-28T09:23:15,002 DEBUG [Thread-2387 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x695c2253 to 127.0.0.1:53251 2024-11-28T09:23:15,002 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f65103e784e3773002ae1a64e3eece97, store=A 2024-11-28T09:23:15,002 DEBUG [Thread-2395 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T09:23:15,002 DEBUG [Thread-2387 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T09:23:15,002 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:15,002 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f65103e784e3773002ae1a64e3eece97, store=B 2024-11-28T09:23:15,002 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:15,002 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f65103e784e3773002ae1a64e3eece97, store=C 2024-11-28T09:23:15,002 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:15,006 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/a9d29a1b3e9a4c1cac94f26b3419569c is 50, key is test_row_0/A:col10/1732785793864/Put/seqid=0 2024-11-28T09:23:15,006 DEBUG [Thread-2393 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7043f683 to 127.0.0.1:53251 2024-11-28T09:23:15,007 DEBUG [Thread-2393 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T09:23:15,007 DEBUG [Thread-2389 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7177efc9 to 127.0.0.1:53251 2024-11-28T09:23:15,007 DEBUG [Thread-2389 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T09:23:15,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742486_1662 (size=12301) 2024-11-28T09:23:15,010 DEBUG [Thread-2391 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x61d38088 to 127.0.0.1:53251 2024-11-28T09:23:15,010 DEBUG [Thread-2391 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T09:23:15,010 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-28T09:23:15,010 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 89 2024-11-28T09:23:15,010 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 97 2024-11-28T09:23:15,010 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 85 2024-11-28T09:23:15,010 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 84 2024-11-28T09:23:15,010 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 84 2024-11-28T09:23:15,010 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-28T09:23:15,010 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-28T09:23:15,010 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1827 2024-11-28T09:23:15,011 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 5481 rows 2024-11-28T09:23:15,011 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1825 2024-11-28T09:23:15,011 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 5475 rows 2024-11-28T09:23:15,011 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1827 2024-11-28T09:23:15,011 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 5481 rows 2024-11-28T09:23:15,011 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1836 2024-11-28T09:23:15,011 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 5508 rows 2024-11-28T09:23:15,011 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1816 2024-11-28T09:23:15,011 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 5448 rows 2024-11-28T09:23:15,011 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-28T09:23:15,011 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3512017b to 127.0.0.1:53251 2024-11-28T09:23:15,011 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T09:23:15,012 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-28T09:23:15,013 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-28T09:23:15,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] procedure2.ProcedureExecutor(1098): Stored pid=150, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-28T09:23:15,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-11-28T09:23:15,016 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732785795016"}]},"ts":"1732785795016"} 2024-11-28T09:23:15,017 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-28T09:23:15,020 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-28T09:23:15,020 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=151, ppid=150, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-28T09:23:15,021 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=152, ppid=151, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=f65103e784e3773002ae1a64e3eece97, UNASSIGN}] 2024-11-28T09:23:15,022 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=152, ppid=151, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=f65103e784e3773002ae1a64e3eece97, UNASSIGN 2024-11-28T09:23:15,022 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=152 updating hbase:meta row=f65103e784e3773002ae1a64e3eece97, regionState=CLOSING, regionLocation=363d8d38a970,33819,1732785660637 2024-11-28T09:23:15,023 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-28T09:23:15,023 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=153, ppid=152, state=RUNNABLE; CloseRegionProcedure f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637}] 2024-11-28T09:23:15,110 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/971f0154553d4779a2047a017c818e69 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/971f0154553d4779a2047a017c818e69 2024-11-28T09:23:15,110 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/13cade4e5763453bb79eab527233e106 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/13cade4e5763453bb79eab527233e106 2024-11-28T09:23:15,113 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f65103e784e3773002ae1a64e3eece97/B of f65103e784e3773002ae1a64e3eece97 into 13cade4e5763453bb79eab527233e106(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:23:15,113 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f65103e784e3773002ae1a64e3eece97/A of f65103e784e3773002ae1a64e3eece97 into 971f0154553d4779a2047a017c818e69(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:23:15,113 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f65103e784e3773002ae1a64e3eece97: 2024-11-28T09:23:15,113 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f65103e784e3773002ae1a64e3eece97: 2024-11-28T09:23:15,113 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97., storeName=f65103e784e3773002ae1a64e3eece97/B, priority=13, startTime=1732785794695; duration=0sec 2024-11-28T09:23:15,113 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97., storeName=f65103e784e3773002ae1a64e3eece97/A, priority=13, startTime=1732785794695; duration=0sec 2024-11-28T09:23:15,113 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:23:15,113 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f65103e784e3773002ae1a64e3eece97:B 2024-11-28T09:23:15,113 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:23:15,113 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f65103e784e3773002ae1a64e3eece97:A 2024-11-28T09:23:15,113 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:23:15,114 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:23:15,114 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): f65103e784e3773002ae1a64e3eece97/C is initiating minor compaction (all files) 2024-11-28T09:23:15,114 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of f65103e784e3773002ae1a64e3eece97/C in TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:23:15,114 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/8e205dd3e91c4e7abe4c63ca53f185a7, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/c01a69e79a71436eb9a598de2d370c6b, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/b7881058214c4bf0a50df9e84991cefc] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp, totalSize=36.7 K 2024-11-28T09:23:15,115 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 8e205dd3e91c4e7abe4c63ca53f185a7, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=490, earliestPutTs=1732785790927 2024-11-28T09:23:15,115 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting c01a69e79a71436eb9a598de2d370c6b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=513, earliestPutTs=1732785792082 2024-11-28T09:23:15,115 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting b7881058214c4bf0a50df9e84991cefc, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=530, earliestPutTs=1732785793224 2024-11-28T09:23:15,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-11-28T09:23:15,119 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): f65103e784e3773002ae1a64e3eece97#C#compaction#576 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:23:15,120 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/9fb8b8108441411ea17ea83c935562d4 is 50, key is test_row_0/C:col10/1732785793853/Put/seqid=0 2024-11-28T09:23:15,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742487_1663 (size=13085) 2024-11-28T09:23:15,125 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/9fb8b8108441411ea17ea83c935562d4 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/9fb8b8108441411ea17ea83c935562d4 2024-11-28T09:23:15,128 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in f65103e784e3773002ae1a64e3eece97/C of f65103e784e3773002ae1a64e3eece97 into 9fb8b8108441411ea17ea83c935562d4(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:23:15,128 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for f65103e784e3773002ae1a64e3eece97: 2024-11-28T09:23:15,128 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97., storeName=f65103e784e3773002ae1a64e3eece97/C, priority=13, startTime=1732785794695; duration=0sec 2024-11-28T09:23:15,128 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:23:15,128 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: f65103e784e3773002ae1a64e3eece97:C 2024-11-28T09:23:15,174 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:23:15,174 INFO [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] handler.UnassignRegionHandler(124): Close f65103e784e3773002ae1a64e3eece97 2024-11-28T09:23:15,174 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-28T09:23:15,175 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegion(1681): Closing f65103e784e3773002ae1a64e3eece97, disabling compactions & flushes 2024-11-28T09:23:15,175 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegion(1942): waiting for 0 compactions & cache flush to complete for region TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:23:15,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-11-28T09:23:15,410 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=554 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/a9d29a1b3e9a4c1cac94f26b3419569c 2024-11-28T09:23:15,416 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/5d28ab09f2544ac1b4d3d936f3e1a35b is 50, key is test_row_0/B:col10/1732785793864/Put/seqid=0 2024-11-28T09:23:15,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742488_1664 (size=12301) 2024-11-28T09:23:15,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-11-28T09:23:15,820 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=554 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/5d28ab09f2544ac1b4d3d936f3e1a35b 2024-11-28T09:23:15,825 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/60c27b42896d4852b631d6c50261217a is 50, key is test_row_0/C:col10/1732785793864/Put/seqid=0 2024-11-28T09:23:15,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742489_1665 (size=12301) 2024-11-28T09:23:16,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-11-28T09:23:16,229 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=554 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/60c27b42896d4852b631d6c50261217a 2024-11-28T09:23:16,232 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/a9d29a1b3e9a4c1cac94f26b3419569c as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/a9d29a1b3e9a4c1cac94f26b3419569c 2024-11-28T09:23:16,235 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/a9d29a1b3e9a4c1cac94f26b3419569c, entries=150, sequenceid=554, filesize=12.0 K 2024-11-28T09:23:16,235 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/5d28ab09f2544ac1b4d3d936f3e1a35b as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/5d28ab09f2544ac1b4d3d936f3e1a35b 2024-11-28T09:23:16,237 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/5d28ab09f2544ac1b4d3d936f3e1a35b, entries=150, sequenceid=554, filesize=12.0 K 2024-11-28T09:23:16,238 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/60c27b42896d4852b631d6c50261217a as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/60c27b42896d4852b631d6c50261217a 2024-11-28T09:23:16,240 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/60c27b42896d4852b631d6c50261217a, entries=150, sequenceid=554, filesize=12.0 K 2024-11-28T09:23:16,240 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=20.13 KB/20610 for f65103e784e3773002ae1a64e3eece97 in 1238ms, sequenceid=554, compaction requested=false 2024-11-28T09:23:16,241 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for f65103e784e3773002ae1a64e3eece97: 2024-11-28T09:23:16,241 INFO [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:23:16,241 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:23:16,241 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. after waiting 0 ms 2024-11-28T09:23:16,241 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:23:16,241 INFO [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegion(2837): Flushing f65103e784e3773002ae1a64e3eece97 3/3 column families, dataSize=20.13 KB heapSize=53.48 KB 2024-11-28T09:23:16,241 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f65103e784e3773002ae1a64e3eece97, store=A 2024-11-28T09:23:16,241 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:16,241 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f65103e784e3773002ae1a64e3eece97, store=B 2024-11-28T09:23:16,241 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:16,241 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.CompactingMemStore(205): FLUSHING TO DISK f65103e784e3773002ae1a64e3eece97, store=C 2024-11-28T09:23:16,241 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:16,244 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/dba28a8abb264bb5a411e6a0008a7d29 is 50, key is test_row_0/A:col10/1732785795006/Put/seqid=0 2024-11-28T09:23:16,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742490_1666 (size=9857) 2024-11-28T09:23:16,647 INFO [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=563 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/dba28a8abb264bb5a411e6a0008a7d29 2024-11-28T09:23:16,652 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/ae44534d046c41e4b44d8d1961868431 is 50, key is test_row_0/B:col10/1732785795006/Put/seqid=0 2024-11-28T09:23:16,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742491_1667 (size=9857) 2024-11-28T09:23:17,057 INFO [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=563 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/ae44534d046c41e4b44d8d1961868431 2024-11-28T09:23:17,064 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/0299974489ff4a2bbb86f8da1f060cd2 is 50, key is test_row_0/C:col10/1732785795006/Put/seqid=0 2024-11-28T09:23:17,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742492_1668 (size=9857) 2024-11-28T09:23:17,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-11-28T09:23:17,467 INFO [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=563 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/0299974489ff4a2bbb86f8da1f060cd2 2024-11-28T09:23:17,471 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/A/dba28a8abb264bb5a411e6a0008a7d29 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/dba28a8abb264bb5a411e6a0008a7d29 2024-11-28T09:23:17,473 INFO [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/dba28a8abb264bb5a411e6a0008a7d29, entries=100, sequenceid=563, filesize=9.6 K 2024-11-28T09:23:17,474 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/B/ae44534d046c41e4b44d8d1961868431 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/ae44534d046c41e4b44d8d1961868431 2024-11-28T09:23:17,476 INFO [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/ae44534d046c41e4b44d8d1961868431, entries=100, sequenceid=563, filesize=9.6 K 2024-11-28T09:23:17,477 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/.tmp/C/0299974489ff4a2bbb86f8da1f060cd2 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/0299974489ff4a2bbb86f8da1f060cd2 2024-11-28T09:23:17,479 INFO [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/0299974489ff4a2bbb86f8da1f060cd2, entries=100, sequenceid=563, filesize=9.6 K 2024-11-28T09:23:17,480 INFO [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegion(3040): Finished flush of dataSize ~20.13 KB/20610, heapSize ~53.44 KB/54720, currentSize=0 B/0 for f65103e784e3773002ae1a64e3eece97 in 1239ms, sequenceid=563, compaction requested=true 2024-11-28T09:23:17,480 DEBUG [StoreCloser-TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/b81390e8b207424fa25563d1ede778dc, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/0bc6485396004155a04fe85a54d5b2b9, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/542eb56564074c56a77572ecf93a709f, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/922331aacb8946d0982490d7d2fae8f4, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/5bde39d17e5c4097ad3bc6df020cbe38, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/bfbb650d243e437bb89819c5e1ec424a, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/4951e0d77b954c8d8fa126fb096230c7, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/52120116b66b412a8e1f324035f533b0, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/cf57bbf77bef4d9e862e01ac8cc65946, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/6e74cd22152d4846808ad8c8e9bca386, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/b5bff66c375546cfb705a78ffa41d47b, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/4dc2f6d01d4b40f1bfa650ee19fd4b96, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/45e7466d0d5e46c49a29c49d448eccef, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/f0d62d02a2cf4b519c8e4b77dde2224f, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/0349015aea3f437085bdf928236c35af, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/09f36efd477f4bb19d01c151b64b8697, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/94172ab90e464842b4e66965c91126a0, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/d6f3623b4f5c462fb24184f943512169, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/011bcbd2021d481ead85eb47f1c6a848, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/278d7916f51e4b4e8c9fa96d80a4767a, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/34bc3048d6c0458a9b3bec56cee22f30, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/307e3fd0e4fd47f7bb33004486c6e022, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/f87f6658233d425ba19ad63f9689b8e0] to archive 2024-11-28T09:23:17,481 DEBUG [StoreCloser-TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-28T09:23:17,482 DEBUG [StoreCloser-TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/b81390e8b207424fa25563d1ede778dc to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/b81390e8b207424fa25563d1ede778dc 2024-11-28T09:23:17,483 DEBUG [StoreCloser-TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/0bc6485396004155a04fe85a54d5b2b9 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/0bc6485396004155a04fe85a54d5b2b9 2024-11-28T09:23:17,484 DEBUG [StoreCloser-TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/542eb56564074c56a77572ecf93a709f to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/542eb56564074c56a77572ecf93a709f 2024-11-28T09:23:17,485 DEBUG [StoreCloser-TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/922331aacb8946d0982490d7d2fae8f4 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/922331aacb8946d0982490d7d2fae8f4 2024-11-28T09:23:17,485 DEBUG [StoreCloser-TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/5bde39d17e5c4097ad3bc6df020cbe38 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/5bde39d17e5c4097ad3bc6df020cbe38 2024-11-28T09:23:17,486 DEBUG [StoreCloser-TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/bfbb650d243e437bb89819c5e1ec424a to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/bfbb650d243e437bb89819c5e1ec424a 2024-11-28T09:23:17,487 DEBUG [StoreCloser-TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/4951e0d77b954c8d8fa126fb096230c7 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/4951e0d77b954c8d8fa126fb096230c7 2024-11-28T09:23:17,488 DEBUG [StoreCloser-TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/52120116b66b412a8e1f324035f533b0 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/52120116b66b412a8e1f324035f533b0 2024-11-28T09:23:17,488 DEBUG [StoreCloser-TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/cf57bbf77bef4d9e862e01ac8cc65946 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/cf57bbf77bef4d9e862e01ac8cc65946 2024-11-28T09:23:17,489 DEBUG [StoreCloser-TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/6e74cd22152d4846808ad8c8e9bca386 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/6e74cd22152d4846808ad8c8e9bca386 2024-11-28T09:23:17,490 DEBUG [StoreCloser-TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/b5bff66c375546cfb705a78ffa41d47b to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/b5bff66c375546cfb705a78ffa41d47b 2024-11-28T09:23:17,491 DEBUG [StoreCloser-TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/4dc2f6d01d4b40f1bfa650ee19fd4b96 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/4dc2f6d01d4b40f1bfa650ee19fd4b96 2024-11-28T09:23:17,491 DEBUG [StoreCloser-TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/45e7466d0d5e46c49a29c49d448eccef to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/45e7466d0d5e46c49a29c49d448eccef 2024-11-28T09:23:17,492 DEBUG [StoreCloser-TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/f0d62d02a2cf4b519c8e4b77dde2224f to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/f0d62d02a2cf4b519c8e4b77dde2224f 2024-11-28T09:23:17,493 DEBUG [StoreCloser-TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/0349015aea3f437085bdf928236c35af to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/0349015aea3f437085bdf928236c35af 2024-11-28T09:23:17,493 DEBUG [StoreCloser-TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/09f36efd477f4bb19d01c151b64b8697 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/09f36efd477f4bb19d01c151b64b8697 2024-11-28T09:23:17,494 DEBUG [StoreCloser-TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/94172ab90e464842b4e66965c91126a0 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/94172ab90e464842b4e66965c91126a0 2024-11-28T09:23:17,495 DEBUG [StoreCloser-TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/d6f3623b4f5c462fb24184f943512169 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/d6f3623b4f5c462fb24184f943512169 2024-11-28T09:23:17,496 DEBUG [StoreCloser-TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/011bcbd2021d481ead85eb47f1c6a848 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/011bcbd2021d481ead85eb47f1c6a848 2024-11-28T09:23:17,496 DEBUG [StoreCloser-TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/278d7916f51e4b4e8c9fa96d80a4767a to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/278d7916f51e4b4e8c9fa96d80a4767a 2024-11-28T09:23:17,497 DEBUG [StoreCloser-TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/34bc3048d6c0458a9b3bec56cee22f30 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/34bc3048d6c0458a9b3bec56cee22f30 2024-11-28T09:23:17,498 DEBUG [StoreCloser-TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/307e3fd0e4fd47f7bb33004486c6e022 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/307e3fd0e4fd47f7bb33004486c6e022 2024-11-28T09:23:17,499 DEBUG [StoreCloser-TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/f87f6658233d425ba19ad63f9689b8e0 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/f87f6658233d425ba19ad63f9689b8e0 2024-11-28T09:23:17,500 DEBUG [StoreCloser-TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/fbf4858f89d84e2dbcf641c4ec3f9d43, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/1389b470ec224e2990d9caeead4edbe7, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/f145e16be63d43229ef675048f24edef, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/d6bbfa90606a4a638b48999b3d04ae85, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/438a57fa1cf343dd94f3333293f30344, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/fd6e737f70c748ae82a2db100e09e7a9, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/ac07082e69044f14a4a03b255b8ee45f, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/374d55a223dc4bf9bfec023eaa5ba774, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/31b17153129c4c83a4d86af159117eb0, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/d3f3218da9a64087a946039d4595a591, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/fae7f0bb079d45888d7c68d992db8886, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/7b73f99909dd47ac9ef20ac9ebbfe486, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/89f44da0a96f466db5062c91f41b704a, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/4e2b3afd1bc14e30bb8c52a62e427c70, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/397e4cca9f8f4dc2aa722d7fe8a632db, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/ec147b41ab1a4141a596497dd39f2822, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/6e2a5dc463f54fde98b8f852dd37976d, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/d1cf62a1071348f5acd1b1c00e2dc283, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/cacfb8cc17af46d7b6183cc64ec30507, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/961b4d3ca6e347c4b84b72dc73fbbf28, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/289dede91d934803b75a2809ec422fdb, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/4b468de7330e4b358b8cc1e58c89f630, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/b9c267e32e6a478488a646435715c60f] to archive 2024-11-28T09:23:17,500 DEBUG [StoreCloser-TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-28T09:23:17,501 DEBUG [StoreCloser-TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/fbf4858f89d84e2dbcf641c4ec3f9d43 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/fbf4858f89d84e2dbcf641c4ec3f9d43 2024-11-28T09:23:17,502 DEBUG [StoreCloser-TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/1389b470ec224e2990d9caeead4edbe7 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/1389b470ec224e2990d9caeead4edbe7 2024-11-28T09:23:17,503 DEBUG [StoreCloser-TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/f145e16be63d43229ef675048f24edef to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/f145e16be63d43229ef675048f24edef 2024-11-28T09:23:17,503 DEBUG [StoreCloser-TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/d6bbfa90606a4a638b48999b3d04ae85 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/d6bbfa90606a4a638b48999b3d04ae85 2024-11-28T09:23:17,504 DEBUG [StoreCloser-TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/438a57fa1cf343dd94f3333293f30344 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/438a57fa1cf343dd94f3333293f30344 2024-11-28T09:23:17,505 DEBUG [StoreCloser-TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/fd6e737f70c748ae82a2db100e09e7a9 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/fd6e737f70c748ae82a2db100e09e7a9 2024-11-28T09:23:17,506 DEBUG [StoreCloser-TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/ac07082e69044f14a4a03b255b8ee45f to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/ac07082e69044f14a4a03b255b8ee45f 2024-11-28T09:23:17,506 DEBUG [StoreCloser-TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/374d55a223dc4bf9bfec023eaa5ba774 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/374d55a223dc4bf9bfec023eaa5ba774 2024-11-28T09:23:17,507 DEBUG [StoreCloser-TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/31b17153129c4c83a4d86af159117eb0 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/31b17153129c4c83a4d86af159117eb0 2024-11-28T09:23:17,508 DEBUG [StoreCloser-TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/d3f3218da9a64087a946039d4595a591 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/d3f3218da9a64087a946039d4595a591 2024-11-28T09:23:17,509 DEBUG [StoreCloser-TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/fae7f0bb079d45888d7c68d992db8886 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/fae7f0bb079d45888d7c68d992db8886 2024-11-28T09:23:17,509 DEBUG [StoreCloser-TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/7b73f99909dd47ac9ef20ac9ebbfe486 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/7b73f99909dd47ac9ef20ac9ebbfe486 2024-11-28T09:23:17,510 DEBUG [StoreCloser-TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/89f44da0a96f466db5062c91f41b704a to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/89f44da0a96f466db5062c91f41b704a 2024-11-28T09:23:17,511 DEBUG [StoreCloser-TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/4e2b3afd1bc14e30bb8c52a62e427c70 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/4e2b3afd1bc14e30bb8c52a62e427c70 2024-11-28T09:23:17,512 DEBUG [StoreCloser-TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/397e4cca9f8f4dc2aa722d7fe8a632db to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/397e4cca9f8f4dc2aa722d7fe8a632db 2024-11-28T09:23:17,512 DEBUG [StoreCloser-TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/ec147b41ab1a4141a596497dd39f2822 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/ec147b41ab1a4141a596497dd39f2822 2024-11-28T09:23:17,513 DEBUG [StoreCloser-TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/6e2a5dc463f54fde98b8f852dd37976d to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/6e2a5dc463f54fde98b8f852dd37976d 2024-11-28T09:23:17,514 DEBUG [StoreCloser-TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/d1cf62a1071348f5acd1b1c00e2dc283 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/d1cf62a1071348f5acd1b1c00e2dc283 2024-11-28T09:23:17,514 DEBUG [StoreCloser-TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/cacfb8cc17af46d7b6183cc64ec30507 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/cacfb8cc17af46d7b6183cc64ec30507 2024-11-28T09:23:17,515 DEBUG [StoreCloser-TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/961b4d3ca6e347c4b84b72dc73fbbf28 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/961b4d3ca6e347c4b84b72dc73fbbf28 2024-11-28T09:23:17,516 DEBUG [StoreCloser-TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/289dede91d934803b75a2809ec422fdb to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/289dede91d934803b75a2809ec422fdb 2024-11-28T09:23:17,517 DEBUG [StoreCloser-TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/4b468de7330e4b358b8cc1e58c89f630 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/4b468de7330e4b358b8cc1e58c89f630 2024-11-28T09:23:17,517 DEBUG [StoreCloser-TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/b9c267e32e6a478488a646435715c60f to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/b9c267e32e6a478488a646435715c60f 2024-11-28T09:23:17,518 DEBUG [StoreCloser-TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/b255cf01c29d44b7a705911ced76e1b5, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/db749e7d99b348c88ce94b7665b5b7a7, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/0489161974e74f5ebeeda2f4bb28dbfd, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/a05798753a844375a95095b2964319c4, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/e3549aff3e6d4ab1b4adac4717a5ccaf, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/32e975d64e7c4ba89e1355bb062b8753, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/4fff88b6220c4afe8a8f25d1cadf09a6, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/c7e3f0fe881a45dd95fc1efeb2bc811a, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/6535ee443abd4e1ebe0135ef0c9f09bb, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/35d1c41318f745178ee6ad36c7dd623c, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/52d400e2eef04667ac542eef2475e13d, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/d6d5b143eb0145d1893d8635d1b8618f, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/fd1869e84bf9410a88e43f46b2ffbab3, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/696bae8146624d14aed0d187659edbd4, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/bcf4dc8f55b94b7797702a2220e8410e, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/2727b0d37d3d4526a2c076fd5a39268d, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/9a95772817e44005902d815a0df6bee6, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/7cb61df0f6494b868822cd1b2f7e6082, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/ead684eb47a64015aaab12cb32275570, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/8e205dd3e91c4e7abe4c63ca53f185a7, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/8f190a55fcff4558a555cdcec7546a97, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/c01a69e79a71436eb9a598de2d370c6b, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/b7881058214c4bf0a50df9e84991cefc] to archive 2024-11-28T09:23:17,519 DEBUG [StoreCloser-TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-28T09:23:17,520 DEBUG [StoreCloser-TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/b255cf01c29d44b7a705911ced76e1b5 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/b255cf01c29d44b7a705911ced76e1b5 2024-11-28T09:23:17,521 DEBUG [StoreCloser-TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/db749e7d99b348c88ce94b7665b5b7a7 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/db749e7d99b348c88ce94b7665b5b7a7 2024-11-28T09:23:17,522 DEBUG [StoreCloser-TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/0489161974e74f5ebeeda2f4bb28dbfd to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/0489161974e74f5ebeeda2f4bb28dbfd 2024-11-28T09:23:17,523 DEBUG [StoreCloser-TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/a05798753a844375a95095b2964319c4 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/a05798753a844375a95095b2964319c4 2024-11-28T09:23:17,523 DEBUG [StoreCloser-TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/e3549aff3e6d4ab1b4adac4717a5ccaf to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/e3549aff3e6d4ab1b4adac4717a5ccaf 2024-11-28T09:23:17,524 DEBUG [StoreCloser-TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/32e975d64e7c4ba89e1355bb062b8753 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/32e975d64e7c4ba89e1355bb062b8753 2024-11-28T09:23:17,525 DEBUG [StoreCloser-TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/4fff88b6220c4afe8a8f25d1cadf09a6 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/4fff88b6220c4afe8a8f25d1cadf09a6 2024-11-28T09:23:17,525 DEBUG [StoreCloser-TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/c7e3f0fe881a45dd95fc1efeb2bc811a to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/c7e3f0fe881a45dd95fc1efeb2bc811a 2024-11-28T09:23:17,526 DEBUG [StoreCloser-TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/6535ee443abd4e1ebe0135ef0c9f09bb to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/6535ee443abd4e1ebe0135ef0c9f09bb 2024-11-28T09:23:17,527 DEBUG [StoreCloser-TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/35d1c41318f745178ee6ad36c7dd623c to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/35d1c41318f745178ee6ad36c7dd623c 2024-11-28T09:23:17,528 DEBUG [StoreCloser-TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/52d400e2eef04667ac542eef2475e13d to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/52d400e2eef04667ac542eef2475e13d 2024-11-28T09:23:17,528 DEBUG [StoreCloser-TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/d6d5b143eb0145d1893d8635d1b8618f to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/d6d5b143eb0145d1893d8635d1b8618f 2024-11-28T09:23:17,529 DEBUG [StoreCloser-TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/fd1869e84bf9410a88e43f46b2ffbab3 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/fd1869e84bf9410a88e43f46b2ffbab3 2024-11-28T09:23:17,530 DEBUG [StoreCloser-TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/696bae8146624d14aed0d187659edbd4 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/696bae8146624d14aed0d187659edbd4 2024-11-28T09:23:17,531 DEBUG [StoreCloser-TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/bcf4dc8f55b94b7797702a2220e8410e to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/bcf4dc8f55b94b7797702a2220e8410e 2024-11-28T09:23:17,532 DEBUG [StoreCloser-TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/2727b0d37d3d4526a2c076fd5a39268d to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/2727b0d37d3d4526a2c076fd5a39268d 2024-11-28T09:23:17,533 DEBUG [StoreCloser-TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/9a95772817e44005902d815a0df6bee6 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/9a95772817e44005902d815a0df6bee6 2024-11-28T09:23:17,533 DEBUG [StoreCloser-TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/7cb61df0f6494b868822cd1b2f7e6082 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/7cb61df0f6494b868822cd1b2f7e6082 2024-11-28T09:23:17,534 DEBUG [StoreCloser-TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/ead684eb47a64015aaab12cb32275570 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/ead684eb47a64015aaab12cb32275570 2024-11-28T09:23:17,535 DEBUG [StoreCloser-TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/8e205dd3e91c4e7abe4c63ca53f185a7 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/8e205dd3e91c4e7abe4c63ca53f185a7 2024-11-28T09:23:17,536 DEBUG [StoreCloser-TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/8f190a55fcff4558a555cdcec7546a97 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/8f190a55fcff4558a555cdcec7546a97 2024-11-28T09:23:17,536 DEBUG [StoreCloser-TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/c01a69e79a71436eb9a598de2d370c6b to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/c01a69e79a71436eb9a598de2d370c6b 2024-11-28T09:23:17,537 DEBUG [StoreCloser-TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/b7881058214c4bf0a50df9e84991cefc to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/b7881058214c4bf0a50df9e84991cefc 2024-11-28T09:23:17,541 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/recovered.edits/566.seqid, newMaxSeqId=566, maxSeqId=1 2024-11-28T09:23:17,541 INFO [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97. 2024-11-28T09:23:17,541 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] regionserver.HRegion(1635): Region close journal for f65103e784e3773002ae1a64e3eece97: 2024-11-28T09:23:17,543 INFO [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=153}] handler.UnassignRegionHandler(170): Closed f65103e784e3773002ae1a64e3eece97 2024-11-28T09:23:17,543 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=152 updating hbase:meta row=f65103e784e3773002ae1a64e3eece97, regionState=CLOSED 2024-11-28T09:23:17,545 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=153, resume processing ppid=152 2024-11-28T09:23:17,545 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=153, ppid=152, state=SUCCESS; CloseRegionProcedure f65103e784e3773002ae1a64e3eece97, server=363d8d38a970,33819,1732785660637 in 2.5210 sec 2024-11-28T09:23:17,546 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=152, resume processing ppid=151 2024-11-28T09:23:17,546 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=152, ppid=151, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=f65103e784e3773002ae1a64e3eece97, UNASSIGN in 2.5240 sec 2024-11-28T09:23:17,547 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=151, resume processing ppid=150 2024-11-28T09:23:17,547 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=151, ppid=150, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 2.5260 sec 2024-11-28T09:23:17,548 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732785797548"}]},"ts":"1732785797548"} 2024-11-28T09:23:17,549 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-28T09:23:17,550 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-28T09:23:17,551 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=150, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 2.5380 sec 2024-11-28T09:23:19,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-11-28T09:23:19,121 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 150 completed 2024-11-28T09:23:19,121 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-28T09:23:19,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] procedure2.ProcedureExecutor(1098): Stored pid=154, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-28T09:23:19,123 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=154, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-28T09:23:19,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=154 2024-11-28T09:23:19,123 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=154, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-28T09:23:19,125 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97 2024-11-28T09:23:19,127 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A, FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B, FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C, FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/recovered.edits] 2024-11-28T09:23:19,129 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/971f0154553d4779a2047a017c818e69 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/971f0154553d4779a2047a017c818e69 2024-11-28T09:23:19,130 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/a9d29a1b3e9a4c1cac94f26b3419569c to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/a9d29a1b3e9a4c1cac94f26b3419569c 2024-11-28T09:23:19,130 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/dba28a8abb264bb5a411e6a0008a7d29 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/A/dba28a8abb264bb5a411e6a0008a7d29 2024-11-28T09:23:19,132 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/13cade4e5763453bb79eab527233e106 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/13cade4e5763453bb79eab527233e106 2024-11-28T09:23:19,133 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/5d28ab09f2544ac1b4d3d936f3e1a35b to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/5d28ab09f2544ac1b4d3d936f3e1a35b 2024-11-28T09:23:19,133 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/ae44534d046c41e4b44d8d1961868431 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/B/ae44534d046c41e4b44d8d1961868431 2024-11-28T09:23:19,135 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/0299974489ff4a2bbb86f8da1f060cd2 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/0299974489ff4a2bbb86f8da1f060cd2 2024-11-28T09:23:19,136 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/60c27b42896d4852b631d6c50261217a to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/60c27b42896d4852b631d6c50261217a 2024-11-28T09:23:19,136 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/9fb8b8108441411ea17ea83c935562d4 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/C/9fb8b8108441411ea17ea83c935562d4 2024-11-28T09:23:19,138 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/recovered.edits/566.seqid to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97/recovered.edits/566.seqid 2024-11-28T09:23:19,139 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/f65103e784e3773002ae1a64e3eece97 2024-11-28T09:23:19,139 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-28T09:23:19,141 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=154, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-28T09:23:19,142 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-28T09:23:19,143 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-28T09:23:19,144 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=154, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-28T09:23:19,144 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-28T09:23:19,144 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732785799144"}]},"ts":"9223372036854775807"} 2024-11-28T09:23:19,145 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-28T09:23:19,145 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => f65103e784e3773002ae1a64e3eece97, NAME => 'TestAcidGuarantees,,1732785773049.f65103e784e3773002ae1a64e3eece97.', STARTKEY => '', ENDKEY => ''}] 2024-11-28T09:23:19,145 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-28T09:23:19,146 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732785799145"}]},"ts":"9223372036854775807"} 2024-11-28T09:23:19,147 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-28T09:23:19,148 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=154, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-28T09:23:19,149 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=154, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 27 msec 2024-11-28T09:23:19,224 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=154 2024-11-28T09:23:19,224 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 154 completed 2024-11-28T09:23:19,233 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithBasicPolicy#testScanAtomicity Thread=241 (was 241), OpenFileDescriptor=458 (was 460), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=685 (was 689), ProcessCount=11 (was 11), AvailableMemoryMB=4168 (was 4212) 2024-11-28T09:23:19,241 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithBasicPolicy#testMobGetAtomicity Thread=241, OpenFileDescriptor=458, MaxFileDescriptor=1048576, SystemLoadAverage=685, ProcessCount=11, AvailableMemoryMB=4168 2024-11-28T09:23:19,243 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-28T09:23:19,243 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-28T09:23:19,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] procedure2.ProcedureExecutor(1098): Stored pid=155, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-28T09:23:19,245 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=155, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-28T09:23:19,245 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:19,245 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 155 2024-11-28T09:23:19,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-11-28T09:23:19,245 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=155, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-28T09:23:19,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742493_1669 (size=960) 2024-11-28T09:23:19,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-11-28T09:23:19,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-11-28T09:23:19,655 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532 2024-11-28T09:23:19,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742494_1670 (size=53) 2024-11-28T09:23:19,745 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T09:23:19,745 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 59fa39bbf7af22b65ee5a2abca8580d1, disabling compactions & flushes 2024-11-28T09:23:19,745 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:19,747 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:19,747 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. after waiting 0 ms 2024-11-28T09:23:19,747 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:19,747 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:19,747 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 59fa39bbf7af22b65ee5a2abca8580d1: 2024-11-28T09:23:19,748 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=155, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-28T09:23:19,748 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1732785799748"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732785799748"}]},"ts":"1732785799748"} 2024-11-28T09:23:19,749 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-28T09:23:19,750 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=155, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-28T09:23:19,750 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732785799750"}]},"ts":"1732785799750"} 2024-11-28T09:23:19,751 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-28T09:23:19,766 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=156, ppid=155, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=59fa39bbf7af22b65ee5a2abca8580d1, ASSIGN}] 2024-11-28T09:23:19,780 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=156, ppid=155, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=59fa39bbf7af22b65ee5a2abca8580d1, ASSIGN 2024-11-28T09:23:19,781 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=156, ppid=155, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=59fa39bbf7af22b65ee5a2abca8580d1, ASSIGN; state=OFFLINE, location=363d8d38a970,33819,1732785660637; forceNewPlan=false, retain=false 2024-11-28T09:23:19,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-11-28T09:23:19,931 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=156 updating hbase:meta row=59fa39bbf7af22b65ee5a2abca8580d1, regionState=OPENING, regionLocation=363d8d38a970,33819,1732785660637 2024-11-28T09:23:19,933 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=157, ppid=156, state=RUNNABLE; OpenRegionProcedure 59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637}] 2024-11-28T09:23:20,085 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:23:20,088 INFO [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:20,088 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegion(7285): Opening region: {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} 2024-11-28T09:23:20,088 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 59fa39bbf7af22b65ee5a2abca8580d1 2024-11-28T09:23:20,088 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T09:23:20,089 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegion(7327): checking encryption for 59fa39bbf7af22b65ee5a2abca8580d1 2024-11-28T09:23:20,089 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegion(7330): checking classloading for 59fa39bbf7af22b65ee5a2abca8580d1 2024-11-28T09:23:20,090 INFO [StoreOpener-59fa39bbf7af22b65ee5a2abca8580d1-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 59fa39bbf7af22b65ee5a2abca8580d1 2024-11-28T09:23:20,091 INFO [StoreOpener-59fa39bbf7af22b65ee5a2abca8580d1-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-28T09:23:20,091 INFO [StoreOpener-59fa39bbf7af22b65ee5a2abca8580d1-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 59fa39bbf7af22b65ee5a2abca8580d1 columnFamilyName A 2024-11-28T09:23:20,092 DEBUG [StoreOpener-59fa39bbf7af22b65ee5a2abca8580d1-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:20,092 INFO [StoreOpener-59fa39bbf7af22b65ee5a2abca8580d1-1 {}] regionserver.HStore(327): Store=59fa39bbf7af22b65ee5a2abca8580d1/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T09:23:20,092 INFO [StoreOpener-59fa39bbf7af22b65ee5a2abca8580d1-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 59fa39bbf7af22b65ee5a2abca8580d1 2024-11-28T09:23:20,093 INFO [StoreOpener-59fa39bbf7af22b65ee5a2abca8580d1-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-28T09:23:20,093 INFO [StoreOpener-59fa39bbf7af22b65ee5a2abca8580d1-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 59fa39bbf7af22b65ee5a2abca8580d1 columnFamilyName B 2024-11-28T09:23:20,093 DEBUG [StoreOpener-59fa39bbf7af22b65ee5a2abca8580d1-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:20,094 INFO [StoreOpener-59fa39bbf7af22b65ee5a2abca8580d1-1 {}] regionserver.HStore(327): Store=59fa39bbf7af22b65ee5a2abca8580d1/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T09:23:20,094 INFO [StoreOpener-59fa39bbf7af22b65ee5a2abca8580d1-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 59fa39bbf7af22b65ee5a2abca8580d1 2024-11-28T09:23:20,095 INFO [StoreOpener-59fa39bbf7af22b65ee5a2abca8580d1-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-28T09:23:20,095 INFO [StoreOpener-59fa39bbf7af22b65ee5a2abca8580d1-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 59fa39bbf7af22b65ee5a2abca8580d1 columnFamilyName C 2024-11-28T09:23:20,095 DEBUG [StoreOpener-59fa39bbf7af22b65ee5a2abca8580d1-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:20,095 INFO [StoreOpener-59fa39bbf7af22b65ee5a2abca8580d1-1 {}] regionserver.HStore(327): Store=59fa39bbf7af22b65ee5a2abca8580d1/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T09:23:20,095 INFO [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:20,096 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1 2024-11-28T09:23:20,096 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1 2024-11-28T09:23:20,098 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-28T09:23:20,099 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegion(1085): writing seq id for 59fa39bbf7af22b65ee5a2abca8580d1 2024-11-28T09:23:20,101 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-28T09:23:20,101 INFO [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegion(1102): Opened 59fa39bbf7af22b65ee5a2abca8580d1; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74571674, jitterRate=0.11120453476905823}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-28T09:23:20,102 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegion(1001): Region open journal for 59fa39bbf7af22b65ee5a2abca8580d1: 2024-11-28T09:23:20,103 INFO [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1., pid=157, masterSystemTime=1732785800085 2024-11-28T09:23:20,104 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:20,104 INFO [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=157}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:20,104 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=156 updating hbase:meta row=59fa39bbf7af22b65ee5a2abca8580d1, regionState=OPEN, openSeqNum=2, regionLocation=363d8d38a970,33819,1732785660637 2024-11-28T09:23:20,107 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=157, resume processing ppid=156 2024-11-28T09:23:20,107 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=157, ppid=156, state=SUCCESS; OpenRegionProcedure 59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 in 172 msec 2024-11-28T09:23:20,108 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=156, resume processing ppid=155 2024-11-28T09:23:20,108 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=156, ppid=155, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=59fa39bbf7af22b65ee5a2abca8580d1, ASSIGN in 341 msec 2024-11-28T09:23:20,109 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=155, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-28T09:23:20,109 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732785800109"}]},"ts":"1732785800109"} 2024-11-28T09:23:20,119 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-28T09:23:20,125 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=155, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-28T09:23:20,126 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=155, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 882 msec 2024-11-28T09:23:20,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-11-28T09:23:20,349 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 155 completed 2024-11-28T09:23:20,351 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x17e5a47d to 127.0.0.1:53251 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2cbfd84f 2024-11-28T09:23:20,362 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2209c520, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T09:23:20,364 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T09:23:20,366 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38656, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T09:23:20,367 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-28T09:23:20,368 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59970, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-28T09:23:20,369 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-28T09:23:20,370 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.2 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-28T09:23:20,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] procedure2.ProcedureExecutor(1098): Stored pid=158, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-11-28T09:23:20,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742495_1671 (size=996) 2024-11-28T09:23:20,383 DEBUG [PEWorker-2 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.960 2024-11-28T09:23:20,383 INFO [PEWorker-2 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.996 2024-11-28T09:23:20,385 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=159, ppid=158, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-28T09:23:20,387 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=160, ppid=159, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=59fa39bbf7af22b65ee5a2abca8580d1, REOPEN/MOVE}] 2024-11-28T09:23:20,388 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=160, ppid=159, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=59fa39bbf7af22b65ee5a2abca8580d1, REOPEN/MOVE 2024-11-28T09:23:20,389 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=160 updating hbase:meta row=59fa39bbf7af22b65ee5a2abca8580d1, regionState=CLOSING, regionLocation=363d8d38a970,33819,1732785660637 2024-11-28T09:23:20,390 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-28T09:23:20,390 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=161, ppid=160, state=RUNNABLE; CloseRegionProcedure 59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637}] 2024-11-28T09:23:20,541 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:23:20,542 INFO [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] handler.UnassignRegionHandler(124): Close 59fa39bbf7af22b65ee5a2abca8580d1 2024-11-28T09:23:20,542 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-28T09:23:20,542 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegion(1681): Closing 59fa39bbf7af22b65ee5a2abca8580d1, disabling compactions & flushes 2024-11-28T09:23:20,542 INFO [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:20,542 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:20,542 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. after waiting 0 ms 2024-11-28T09:23:20,542 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:20,546 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-11-28T09:23:20,547 INFO [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:20,547 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegion(1635): Region close journal for 59fa39bbf7af22b65ee5a2abca8580d1: 2024-11-28T09:23:20,547 WARN [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] regionserver.HRegionServer(3786): Not adding moved region record: 59fa39bbf7af22b65ee5a2abca8580d1 to self. 2024-11-28T09:23:20,548 INFO [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=161}] handler.UnassignRegionHandler(170): Closed 59fa39bbf7af22b65ee5a2abca8580d1 2024-11-28T09:23:20,550 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=160 updating hbase:meta row=59fa39bbf7af22b65ee5a2abca8580d1, regionState=CLOSED 2024-11-28T09:23:20,552 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=161, resume processing ppid=160 2024-11-28T09:23:20,552 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=161, ppid=160, state=SUCCESS; CloseRegionProcedure 59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 in 161 msec 2024-11-28T09:23:20,553 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=160, ppid=159, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=59fa39bbf7af22b65ee5a2abca8580d1, REOPEN/MOVE; state=CLOSED, location=363d8d38a970,33819,1732785660637; forceNewPlan=false, retain=true 2024-11-28T09:23:20,704 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=160 updating hbase:meta row=59fa39bbf7af22b65ee5a2abca8580d1, regionState=OPENING, regionLocation=363d8d38a970,33819,1732785660637 2024-11-28T09:23:20,706 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=162, ppid=160, state=RUNNABLE; OpenRegionProcedure 59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637}] 2024-11-28T09:23:20,857 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:23:20,860 INFO [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:20,860 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] regionserver.HRegion(7285): Opening region: {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} 2024-11-28T09:23:20,861 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 59fa39bbf7af22b65ee5a2abca8580d1 2024-11-28T09:23:20,861 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T09:23:20,861 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] regionserver.HRegion(7327): checking encryption for 59fa39bbf7af22b65ee5a2abca8580d1 2024-11-28T09:23:20,861 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] regionserver.HRegion(7330): checking classloading for 59fa39bbf7af22b65ee5a2abca8580d1 2024-11-28T09:23:20,862 INFO [StoreOpener-59fa39bbf7af22b65ee5a2abca8580d1-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 59fa39bbf7af22b65ee5a2abca8580d1 2024-11-28T09:23:20,863 INFO [StoreOpener-59fa39bbf7af22b65ee5a2abca8580d1-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-28T09:23:20,863 INFO [StoreOpener-59fa39bbf7af22b65ee5a2abca8580d1-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 59fa39bbf7af22b65ee5a2abca8580d1 columnFamilyName A 2024-11-28T09:23:20,864 DEBUG [StoreOpener-59fa39bbf7af22b65ee5a2abca8580d1-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:20,865 INFO [StoreOpener-59fa39bbf7af22b65ee5a2abca8580d1-1 {}] regionserver.HStore(327): Store=59fa39bbf7af22b65ee5a2abca8580d1/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T09:23:20,865 INFO [StoreOpener-59fa39bbf7af22b65ee5a2abca8580d1-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 59fa39bbf7af22b65ee5a2abca8580d1 2024-11-28T09:23:20,866 INFO [StoreOpener-59fa39bbf7af22b65ee5a2abca8580d1-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-28T09:23:20,866 INFO [StoreOpener-59fa39bbf7af22b65ee5a2abca8580d1-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 59fa39bbf7af22b65ee5a2abca8580d1 columnFamilyName B 2024-11-28T09:23:20,866 DEBUG [StoreOpener-59fa39bbf7af22b65ee5a2abca8580d1-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:20,866 INFO [StoreOpener-59fa39bbf7af22b65ee5a2abca8580d1-1 {}] regionserver.HStore(327): Store=59fa39bbf7af22b65ee5a2abca8580d1/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T09:23:20,866 INFO [StoreOpener-59fa39bbf7af22b65ee5a2abca8580d1-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 59fa39bbf7af22b65ee5a2abca8580d1 2024-11-28T09:23:20,867 INFO [StoreOpener-59fa39bbf7af22b65ee5a2abca8580d1-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-28T09:23:20,867 INFO [StoreOpener-59fa39bbf7af22b65ee5a2abca8580d1-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 59fa39bbf7af22b65ee5a2abca8580d1 columnFamilyName C 2024-11-28T09:23:20,867 DEBUG [StoreOpener-59fa39bbf7af22b65ee5a2abca8580d1-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:20,867 INFO [StoreOpener-59fa39bbf7af22b65ee5a2abca8580d1-1 {}] regionserver.HStore(327): Store=59fa39bbf7af22b65ee5a2abca8580d1/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T09:23:20,868 INFO [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:20,868 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1 2024-11-28T09:23:20,869 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1 2024-11-28T09:23:20,871 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-28T09:23:20,874 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] regionserver.HRegion(1085): writing seq id for 59fa39bbf7af22b65ee5a2abca8580d1 2024-11-28T09:23:20,876 INFO [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] regionserver.HRegion(1102): Opened 59fa39bbf7af22b65ee5a2abca8580d1; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59009988, jitterRate=-0.12068265676498413}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-28T09:23:20,877 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] regionserver.HRegion(1001): Region open journal for 59fa39bbf7af22b65ee5a2abca8580d1: 2024-11-28T09:23:20,878 INFO [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1., pid=162, masterSystemTime=1732785800857 2024-11-28T09:23:20,879 DEBUG [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:20,879 INFO [RS_OPEN_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_OPEN_REGION, pid=162}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:20,879 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=160 updating hbase:meta row=59fa39bbf7af22b65ee5a2abca8580d1, regionState=OPEN, openSeqNum=5, regionLocation=363d8d38a970,33819,1732785660637 2024-11-28T09:23:20,882 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=162, resume processing ppid=160 2024-11-28T09:23:20,882 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=162, ppid=160, state=SUCCESS; OpenRegionProcedure 59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 in 174 msec 2024-11-28T09:23:20,883 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=160, resume processing ppid=159 2024-11-28T09:23:20,883 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=160, ppid=159, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=59fa39bbf7af22b65ee5a2abca8580d1, REOPEN/MOVE in 495 msec 2024-11-28T09:23:20,886 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=159, resume processing ppid=158 2024-11-28T09:23:20,886 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=159, ppid=158, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 499 msec 2024-11-28T09:23:20,887 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=158, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 516 msec 2024-11-28T09:23:20,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=158 2024-11-28T09:23:20,890 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6d9954b7 to 127.0.0.1:53251 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3fb684eb 2024-11-28T09:23:20,893 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@537a66f8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T09:23:20,894 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0d5efb7a to 127.0.0.1:53251 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@644b7e6 2024-11-28T09:23:20,929 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6094c70, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T09:23:20,930 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7fc332d8 to 127.0.0.1:53251 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5c9b5141 2024-11-28T09:23:20,937 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@103dfc6e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T09:23:20,938 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x17327621 to 127.0.0.1:53251 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@11a52cdf 2024-11-28T09:23:20,941 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6e047c09, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T09:23:20,942 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1584f18a to 127.0.0.1:53251 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2d7fe431 2024-11-28T09:23:20,948 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@60d631a3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T09:23:20,949 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x091d72db to 127.0.0.1:53251 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@58971172 2024-11-28T09:23:20,952 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6e757135, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T09:23:20,953 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5d836f78 to 127.0.0.1:53251 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3d7fe93b 2024-11-28T09:23:20,959 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7846cb78, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T09:23:20,960 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x53305d9b to 127.0.0.1:53251 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@11c440f7 2024-11-28T09:23:20,971 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5f1754bc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T09:23:20,972 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6bb6288a to 127.0.0.1:53251 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@58460ef3 2024-11-28T09:23:20,986 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3d9113f3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T09:23:20,987 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x06556601 to 127.0.0.1:53251 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6e8cd1ae 2024-11-28T09:23:20,999 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5bb75907, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T09:23:21,020 DEBUG [hconnection-0x4e3652f2-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T09:23:21,020 DEBUG [hconnection-0x60790c64-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T09:23:21,021 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38668, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T09:23:21,023 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38676, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T09:23:21,032 DEBUG [hconnection-0xef453e4-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T09:23:21,034 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38682, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T09:23:21,042 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T09:23:21,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] procedure2.ProcedureExecutor(1098): Stored pid=163, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees 2024-11-28T09:23:21,044 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=163, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T09:23:21,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-28T09:23:21,045 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=163, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T09:23:21,045 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=164, ppid=163, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T09:23:21,060 DEBUG [hconnection-0x5c0b7822-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T09:23:21,062 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38698, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T09:23:21,105 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 59fa39bbf7af22b65ee5a2abca8580d1 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-28T09:23:21,105 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59fa39bbf7af22b65ee5a2abca8580d1, store=A 2024-11-28T09:23:21,105 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:21,105 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59fa39bbf7af22b65ee5a2abca8580d1, store=B 2024-11-28T09:23:21,105 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:21,105 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59fa39bbf7af22b65ee5a2abca8580d1, store=C 2024-11-28T09:23:21,105 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:21,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 59fa39bbf7af22b65ee5a2abca8580d1 2024-11-28T09:23:21,118 DEBUG [hconnection-0x3e4b2f46-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T09:23:21,120 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38710, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T09:23:21,135 DEBUG [hconnection-0x48828061-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T09:23:21,136 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38714, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T09:23:21,141 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:21,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38668 deadline: 1732785861139, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:21,141 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:21,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38682 deadline: 1732785861139, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:21,141 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:21,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38714 deadline: 1732785861138, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:21,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-28T09:23:21,148 DEBUG [hconnection-0x37bfc1d5-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T09:23:21,149 DEBUG [hconnection-0x67d9ce34-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T09:23:21,150 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38722, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T09:23:21,154 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:21,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38722 deadline: 1732785861153, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:21,155 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38724, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T09:23:21,158 DEBUG [hconnection-0x7754699a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T09:23:21,159 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38726, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T09:23:21,161 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:21,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38726 deadline: 1732785861160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:21,181 DEBUG [hconnection-0x3c82f0bd-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T09:23:21,182 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38734, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T09:23:21,197 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:23:21,197 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-28T09:23:21,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:21,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. as already flushing 2024-11-28T09:23:21,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:21,197 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:21,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:21,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:21,210 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411282c7aa5b0711240f98504ecf8747ca936_59fa39bbf7af22b65ee5a2abca8580d1 is 50, key is test_row_0/A:col10/1732785801103/Put/seqid=0 2024-11-28T09:23:21,247 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:21,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38668 deadline: 1732785861244, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:21,251 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:21,251 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:21,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38682 deadline: 1732785861249, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:21,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38714 deadline: 1732785861249, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:21,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742496_1672 (size=12154) 2024-11-28T09:23:21,257 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:21,259 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:21,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38722 deadline: 1732785861255, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:21,263 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:21,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38726 deadline: 1732785861262, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:21,268 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411282c7aa5b0711240f98504ecf8747ca936_59fa39bbf7af22b65ee5a2abca8580d1 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411282c7aa5b0711240f98504ecf8747ca936_59fa39bbf7af22b65ee5a2abca8580d1 2024-11-28T09:23:21,269 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/53474051c55544009d772b9412fbe473, store: [table=TestAcidGuarantees family=A region=59fa39bbf7af22b65ee5a2abca8580d1] 2024-11-28T09:23:21,269 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/53474051c55544009d772b9412fbe473 is 175, key is test_row_0/A:col10/1732785801103/Put/seqid=0 2024-11-28T09:23:21,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742497_1673 (size=30955) 2024-11-28T09:23:21,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-28T09:23:21,350 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:23:21,350 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-28T09:23:21,350 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:21,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. as already flushing 2024-11-28T09:23:21,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:21,351 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:21,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:21,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:21,452 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:21,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38668 deadline: 1732785861449, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:21,456 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:21,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38714 deadline: 1732785861454, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:21,456 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:21,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38682 deadline: 1732785861453, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:21,463 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:21,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38722 deadline: 1732785861460, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:21,469 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:21,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38726 deadline: 1732785861468, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:21,503 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:23:21,504 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-28T09:23:21,504 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:21,504 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. as already flushing 2024-11-28T09:23:21,504 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:21,504 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:21,504 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:21,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:21,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-28T09:23:21,656 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:23:21,660 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-28T09:23:21,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:21,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. as already flushing 2024-11-28T09:23:21,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:21,660 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:21,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:21,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:21,691 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=16, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/53474051c55544009d772b9412fbe473 2024-11-28T09:23:21,741 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/B/2e4841ab3b02425cbbb50d1feffd4a96 is 50, key is test_row_0/B:col10/1732785801103/Put/seqid=0 2024-11-28T09:23:21,771 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:21,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38714 deadline: 1732785861768, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:21,772 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:21,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38668 deadline: 1732785861769, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:21,772 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:21,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38682 deadline: 1732785861769, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:21,773 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:21,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38722 deadline: 1732785861768, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:21,775 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:21,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38726 deadline: 1732785861772, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:21,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742498_1674 (size=12001) 2024-11-28T09:23:21,791 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/B/2e4841ab3b02425cbbb50d1feffd4a96 2024-11-28T09:23:21,813 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:23:21,813 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-28T09:23:21,813 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:21,813 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. as already flushing 2024-11-28T09:23:21,813 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:21,813 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:21,814 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:21,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:21,829 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/C/a66bf0d88eb84a099073de78f0a9a4b1 is 50, key is test_row_0/C:col10/1732785801103/Put/seqid=0 2024-11-28T09:23:21,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742499_1675 (size=12001) 2024-11-28T09:23:21,855 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=16 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/C/a66bf0d88eb84a099073de78f0a9a4b1 2024-11-28T09:23:21,862 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/53474051c55544009d772b9412fbe473 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/53474051c55544009d772b9412fbe473 2024-11-28T09:23:21,868 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/53474051c55544009d772b9412fbe473, entries=150, sequenceid=16, filesize=30.2 K 2024-11-28T09:23:21,870 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/B/2e4841ab3b02425cbbb50d1feffd4a96 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/2e4841ab3b02425cbbb50d1feffd4a96 2024-11-28T09:23:21,885 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/2e4841ab3b02425cbbb50d1feffd4a96, entries=150, sequenceid=16, filesize=11.7 K 2024-11-28T09:23:21,888 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/C/a66bf0d88eb84a099073de78f0a9a4b1 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/a66bf0d88eb84a099073de78f0a9a4b1 2024-11-28T09:23:21,896 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/a66bf0d88eb84a099073de78f0a9a4b1, entries=150, sequenceid=16, filesize=11.7 K 2024-11-28T09:23:21,897 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for 59fa39bbf7af22b65ee5a2abca8580d1 in 792ms, sequenceid=16, compaction requested=false 2024-11-28T09:23:21,897 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-11-28T09:23:21,898 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 59fa39bbf7af22b65ee5a2abca8580d1: 2024-11-28T09:23:21,967 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:23:21,967 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-28T09:23:21,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:21,968 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2837): Flushing 59fa39bbf7af22b65ee5a2abca8580d1 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-28T09:23:21,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59fa39bbf7af22b65ee5a2abca8580d1, store=A 2024-11-28T09:23:21,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:21,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59fa39bbf7af22b65ee5a2abca8580d1, store=B 2024-11-28T09:23:21,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:21,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59fa39bbf7af22b65ee5a2abca8580d1, store=C 2024-11-28T09:23:21,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:21,989 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128223391f33b864c2b839bbd459bc20655_59fa39bbf7af22b65ee5a2abca8580d1 is 50, key is test_row_0/A:col10/1732785801133/Put/seqid=0 2024-11-28T09:23:22,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742500_1676 (size=12154) 2024-11-28T09:23:22,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:22,053 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128223391f33b864c2b839bbd459bc20655_59fa39bbf7af22b65ee5a2abca8580d1 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128223391f33b864c2b839bbd459bc20655_59fa39bbf7af22b65ee5a2abca8580d1 2024-11-28T09:23:22,054 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/1e75fc971808471788836ca82218a1e4, store: [table=TestAcidGuarantees family=A region=59fa39bbf7af22b65ee5a2abca8580d1] 2024-11-28T09:23:22,055 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/1e75fc971808471788836ca82218a1e4 is 175, key is test_row_0/A:col10/1732785801133/Put/seqid=0 2024-11-28T09:23:22,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742501_1677 (size=30955) 2024-11-28T09:23:22,080 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=41, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/1e75fc971808471788836ca82218a1e4 2024-11-28T09:23:22,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/B/1d1e31b6609d4949ab5cbeb25abc8013 is 50, key is test_row_0/B:col10/1732785801133/Put/seqid=0 2024-11-28T09:23:22,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742502_1678 (size=12001) 2024-11-28T09:23:22,142 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/B/1d1e31b6609d4949ab5cbeb25abc8013 2024-11-28T09:23:22,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-28T09:23:22,163 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/C/262c44a0a7404bf3b4dae265083b33e1 is 50, key is test_row_0/C:col10/1732785801133/Put/seqid=0 2024-11-28T09:23:22,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742503_1679 (size=12001) 2024-11-28T09:23:22,277 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. as already flushing 2024-11-28T09:23:22,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 59fa39bbf7af22b65ee5a2abca8580d1 2024-11-28T09:23:22,288 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:22,288 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:22,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38682 deadline: 1732785862282, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:22,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38726 deadline: 1732785862282, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:22,292 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:22,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38714 deadline: 1732785862288, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:22,293 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:22,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38668 deadline: 1732785862288, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:22,293 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:22,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38722 deadline: 1732785862290, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:22,391 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:22,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38726 deadline: 1732785862389, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:22,392 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:22,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38682 deadline: 1732785862389, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:22,395 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:22,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38714 deadline: 1732785862394, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:22,397 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:22,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38722 deadline: 1732785862395, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:22,398 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:22,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38668 deadline: 1732785862395, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:22,595 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:22,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38726 deadline: 1732785862593, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:22,603 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:22,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38722 deadline: 1732785862600, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:22,603 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:22,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38668 deadline: 1732785862601, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:22,604 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:22,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38682 deadline: 1732785862601, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:22,605 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:22,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38714 deadline: 1732785862602, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:22,649 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=41 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/C/262c44a0a7404bf3b4dae265083b33e1 2024-11-28T09:23:22,683 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/1e75fc971808471788836ca82218a1e4 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/1e75fc971808471788836ca82218a1e4 2024-11-28T09:23:22,689 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/1e75fc971808471788836ca82218a1e4, entries=150, sequenceid=41, filesize=30.2 K 2024-11-28T09:23:22,690 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/B/1d1e31b6609d4949ab5cbeb25abc8013 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/1d1e31b6609d4949ab5cbeb25abc8013 2024-11-28T09:23:22,694 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/1d1e31b6609d4949ab5cbeb25abc8013, entries=150, sequenceid=41, filesize=11.7 K 2024-11-28T09:23:22,695 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/C/262c44a0a7404bf3b4dae265083b33e1 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/262c44a0a7404bf3b4dae265083b33e1 2024-11-28T09:23:22,699 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/262c44a0a7404bf3b4dae265083b33e1, entries=150, sequenceid=41, filesize=11.7 K 2024-11-28T09:23:22,700 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=67.09 KB/68700 for 59fa39bbf7af22b65ee5a2abca8580d1 in 732ms, sequenceid=41, compaction requested=false 2024-11-28T09:23:22,700 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2538): Flush status journal for 59fa39bbf7af22b65ee5a2abca8580d1: 2024-11-28T09:23:22,700 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:22,700 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=164 2024-11-28T09:23:22,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4106): Remote procedure done, pid=164 2024-11-28T09:23:22,703 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=164, resume processing ppid=163 2024-11-28T09:23:22,703 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=164, ppid=163, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6560 sec 2024-11-28T09:23:22,704 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=163, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees in 1.6610 sec 2024-11-28T09:23:22,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 59fa39bbf7af22b65ee5a2abca8580d1 2024-11-28T09:23:22,904 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 59fa39bbf7af22b65ee5a2abca8580d1 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-28T09:23:22,904 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59fa39bbf7af22b65ee5a2abca8580d1, store=A 2024-11-28T09:23:22,905 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:22,905 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59fa39bbf7af22b65ee5a2abca8580d1, store=B 2024-11-28T09:23:22,905 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:22,905 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59fa39bbf7af22b65ee5a2abca8580d1, store=C 2024-11-28T09:23:22,905 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:22,937 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128580b58b8c22c4ef3bfda90e6a8da6be8_59fa39bbf7af22b65ee5a2abca8580d1 is 50, key is test_row_0/A:col10/1732785802286/Put/seqid=0 2024-11-28T09:23:22,980 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:22,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38668 deadline: 1732785862976, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:22,981 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:22,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38722 deadline: 1732785862977, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:22,983 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:22,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38726 deadline: 1732785862978, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:22,983 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:22,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38714 deadline: 1732785862979, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:22,983 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:22,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38682 deadline: 1732785862979, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:22,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742504_1680 (size=14594) 2024-11-28T09:23:22,989 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:22,994 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128580b58b8c22c4ef3bfda90e6a8da6be8_59fa39bbf7af22b65ee5a2abca8580d1 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128580b58b8c22c4ef3bfda90e6a8da6be8_59fa39bbf7af22b65ee5a2abca8580d1 2024-11-28T09:23:22,995 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/b52037ab39274300b57d25edb9a25ac4, store: [table=TestAcidGuarantees family=A region=59fa39bbf7af22b65ee5a2abca8580d1] 2024-11-28T09:23:22,996 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/b52037ab39274300b57d25edb9a25ac4 is 175, key is test_row_0/A:col10/1732785802286/Put/seqid=0 2024-11-28T09:23:23,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742505_1681 (size=39549) 2024-11-28T09:23:23,035 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=55, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/b52037ab39274300b57d25edb9a25ac4 2024-11-28T09:23:23,057 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/B/078a0d2f06ae41f9aba67aaa30764bbf is 50, key is test_row_0/B:col10/1732785802286/Put/seqid=0 2024-11-28T09:23:23,086 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:23,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38668 deadline: 1732785863082, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:23,087 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:23,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38722 deadline: 1732785863083, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:23,088 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:23,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38726 deadline: 1732785863084, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:23,088 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:23,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38714 deadline: 1732785863084, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:23,091 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:23,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38682 deadline: 1732785863088, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:23,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742506_1682 (size=12001) 2024-11-28T09:23:23,107 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-28T09:23:23,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-28T09:23:23,149 INFO [Thread-2928 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 163 completed 2024-11-28T09:23:23,151 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T09:23:23,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] procedure2.ProcedureExecutor(1098): Stored pid=165, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees 2024-11-28T09:23:23,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-28T09:23:23,152 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=165, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T09:23:23,153 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=165, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T09:23:23,153 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=166, ppid=165, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T09:23:23,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-28T09:23:23,291 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:23,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38668 deadline: 1732785863288, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:23,293 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:23,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38722 deadline: 1732785863290, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:23,294 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:23,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38726 deadline: 1732785863291, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:23,298 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:23,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38714 deadline: 1732785863291, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:23,299 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:23,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38682 deadline: 1732785863293, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:23,306 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:23:23,306 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-11-28T09:23:23,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:23,307 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. as already flushing 2024-11-28T09:23:23,307 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:23,307 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:23,307 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:23,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:23,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-28T09:23:23,460 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:23:23,460 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-11-28T09:23:23,460 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:23,460 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. as already flushing 2024-11-28T09:23:23,461 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:23,461 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:23,461 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:23,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:23,507 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/B/078a0d2f06ae41f9aba67aaa30764bbf 2024-11-28T09:23:23,517 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/C/18ba966a00014453bcff87bb77841ad2 is 50, key is test_row_0/C:col10/1732785802286/Put/seqid=0 2024-11-28T09:23:23,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742507_1683 (size=12001) 2024-11-28T09:23:23,580 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/C/18ba966a00014453bcff87bb77841ad2 2024-11-28T09:23:23,586 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/b52037ab39274300b57d25edb9a25ac4 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/b52037ab39274300b57d25edb9a25ac4 2024-11-28T09:23:23,593 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/b52037ab39274300b57d25edb9a25ac4, entries=200, sequenceid=55, filesize=38.6 K 2024-11-28T09:23:23,596 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/B/078a0d2f06ae41f9aba67aaa30764bbf as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/078a0d2f06ae41f9aba67aaa30764bbf 2024-11-28T09:23:23,598 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:23,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38668 deadline: 1732785863594, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:23,600 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/078a0d2f06ae41f9aba67aaa30764bbf, entries=150, sequenceid=55, filesize=11.7 K 2024-11-28T09:23:23,601 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:23,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38726 deadline: 1732785863596, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:23,602 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/C/18ba966a00014453bcff87bb77841ad2 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/18ba966a00014453bcff87bb77841ad2 2024-11-28T09:23:23,602 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:23,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38722 deadline: 1732785863597, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:23,607 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/18ba966a00014453bcff87bb77841ad2, entries=150, sequenceid=55, filesize=11.7 K 2024-11-28T09:23:23,608 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=134.18 KB/137400 for 59fa39bbf7af22b65ee5a2abca8580d1 in 704ms, sequenceid=55, compaction requested=true 2024-11-28T09:23:23,608 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 59fa39bbf7af22b65ee5a2abca8580d1: 2024-11-28T09:23:23,608 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 59fa39bbf7af22b65ee5a2abca8580d1:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T09:23:23,608 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:23:23,608 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 59fa39bbf7af22b65ee5a2abca8580d1:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T09:23:23,608 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:23:23,608 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 59fa39bbf7af22b65ee5a2abca8580d1:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T09:23:23,608 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:23:23,608 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-28T09:23:23,608 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:23:23,610 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101459 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:23:23,610 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1540): 59fa39bbf7af22b65ee5a2abca8580d1/A is initiating minor compaction (all files) 2024-11-28T09:23:23,610 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 59fa39bbf7af22b65ee5a2abca8580d1 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-28T09:23:23,610 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 59fa39bbf7af22b65ee5a2abca8580d1/A in TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:23,610 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/53474051c55544009d772b9412fbe473, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/1e75fc971808471788836ca82218a1e4, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/b52037ab39274300b57d25edb9a25ac4] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp, totalSize=99.1 K 2024-11-28T09:23:23,610 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59fa39bbf7af22b65ee5a2abca8580d1, store=A 2024-11-28T09:23:23,610 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:23,610 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. files: [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/53474051c55544009d772b9412fbe473, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/1e75fc971808471788836ca82218a1e4, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/b52037ab39274300b57d25edb9a25ac4] 2024-11-28T09:23:23,610 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:23,610 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59fa39bbf7af22b65ee5a2abca8580d1, store=B 2024-11-28T09:23:23,610 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:23,610 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59fa39bbf7af22b65ee5a2abca8580d1, store=C 2024-11-28T09:23:23,610 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:23,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 59fa39bbf7af22b65ee5a2abca8580d1 2024-11-28T09:23:23,612 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:23:23,612 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): 59fa39bbf7af22b65ee5a2abca8580d1/B is initiating minor compaction (all files) 2024-11-28T09:23:23,612 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 59fa39bbf7af22b65ee5a2abca8580d1/B in TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:23,613 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/2e4841ab3b02425cbbb50d1feffd4a96, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/1d1e31b6609d4949ab5cbeb25abc8013, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/078a0d2f06ae41f9aba67aaa30764bbf] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp, totalSize=35.2 K 2024-11-28T09:23:23,613 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 53474051c55544009d772b9412fbe473, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1732785801029 2024-11-28T09:23:23,613 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 2e4841ab3b02425cbbb50d1feffd4a96, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1732785801029 2024-11-28T09:23:23,613 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1e75fc971808471788836ca82218a1e4, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1732785801133 2024-11-28T09:23:23,616 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 1d1e31b6609d4949ab5cbeb25abc8013, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1732785801133 2024-11-28T09:23:23,616 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting b52037ab39274300b57d25edb9a25ac4, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1732785802279 2024-11-28T09:23:23,616 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 078a0d2f06ae41f9aba67aaa30764bbf, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1732785802286 2024-11-28T09:23:23,623 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:23:23,624 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-11-28T09:23:23,624 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:23,624 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. as already flushing 2024-11-28T09:23:23,624 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:23,624 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:23,624 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:23,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:23,629 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411289ef188cd6a764dc8bee6591396d9865b_59fa39bbf7af22b65ee5a2abca8580d1 is 50, key is test_row_0/A:col10/1732785802978/Put/seqid=0 2024-11-28T09:23:23,642 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=59fa39bbf7af22b65ee5a2abca8580d1] 2024-11-28T09:23:23,646 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 59fa39bbf7af22b65ee5a2abca8580d1#B#compaction#593 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:23:23,646 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/B/be4d0be1cf1944f7b84194034d58979b is 50, key is test_row_0/B:col10/1732785802286/Put/seqid=0 2024-11-28T09:23:23,653 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:23,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38714 deadline: 1732785863650, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:23,655 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:23,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38682 deadline: 1732785863652, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:23,667 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024112877b7a2a682b946f0b30842112db709ed_59fa39bbf7af22b65ee5a2abca8580d1 store=[table=TestAcidGuarantees family=A region=59fa39bbf7af22b65ee5a2abca8580d1] 2024-11-28T09:23:23,670 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024112877b7a2a682b946f0b30842112db709ed_59fa39bbf7af22b65ee5a2abca8580d1, store=[table=TestAcidGuarantees family=A region=59fa39bbf7af22b65ee5a2abca8580d1] 2024-11-28T09:23:23,670 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112877b7a2a682b946f0b30842112db709ed_59fa39bbf7af22b65ee5a2abca8580d1 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=59fa39bbf7af22b65ee5a2abca8580d1] 2024-11-28T09:23:23,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742508_1684 (size=14594) 2024-11-28T09:23:23,690 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:23,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742509_1685 (size=12104) 2024-11-28T09:23:23,717 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411289ef188cd6a764dc8bee6591396d9865b_59fa39bbf7af22b65ee5a2abca8580d1 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411289ef188cd6a764dc8bee6591396d9865b_59fa39bbf7af22b65ee5a2abca8580d1 2024-11-28T09:23:23,720 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/bad8f58fd48c44bb815b0a192e06ecde, store: [table=TestAcidGuarantees family=A region=59fa39bbf7af22b65ee5a2abca8580d1] 2024-11-28T09:23:23,721 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/bad8f58fd48c44bb815b0a192e06ecde is 175, key is test_row_0/A:col10/1732785802978/Put/seqid=0 2024-11-28T09:23:23,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742510_1686 (size=4469) 2024-11-28T09:23:23,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-28T09:23:23,760 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:23,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38714 deadline: 1732785863756, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:23,766 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:23,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38682 deadline: 1732785863761, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:23,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742511_1687 (size=39549) 2024-11-28T09:23:23,776 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:23:23,777 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-11-28T09:23:23,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:23,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. as already flushing 2024-11-28T09:23:23,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:23,777 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:23,777 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:23,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:23,931 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:23:23,932 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-11-28T09:23:23,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:23,933 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. as already flushing 2024-11-28T09:23:23,933 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:23,933 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:23,933 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:23,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:23,965 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:23,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38714 deadline: 1732785863962, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:23,970 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:23,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38682 deadline: 1732785863968, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:24,086 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:23:24,096 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-11-28T09:23:24,096 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:24,096 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. as already flushing 2024-11-28T09:23:24,096 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:24,096 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:24,097 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:24,107 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:24,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38668 deadline: 1732785864103, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:24,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:24,110 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:24,110 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/B/be4d0be1cf1944f7b84194034d58979b as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/be4d0be1cf1944f7b84194034d58979b 2024-11-28T09:23:24,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38726 deadline: 1732785864105, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:24,110 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:24,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38722 deadline: 1732785864104, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:24,116 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 59fa39bbf7af22b65ee5a2abca8580d1/B of 59fa39bbf7af22b65ee5a2abca8580d1 into be4d0be1cf1944f7b84194034d58979b(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:23:24,116 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 59fa39bbf7af22b65ee5a2abca8580d1: 2024-11-28T09:23:24,116 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1., storeName=59fa39bbf7af22b65ee5a2abca8580d1/B, priority=13, startTime=1732785803608; duration=0sec 2024-11-28T09:23:24,116 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:23:24,116 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 59fa39bbf7af22b65ee5a2abca8580d1:B 2024-11-28T09:23:24,116 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:23:24,117 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:23:24,117 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): 59fa39bbf7af22b65ee5a2abca8580d1/C is initiating minor compaction (all files) 2024-11-28T09:23:24,117 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 59fa39bbf7af22b65ee5a2abca8580d1/C in TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:24,117 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/a66bf0d88eb84a099073de78f0a9a4b1, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/262c44a0a7404bf3b4dae265083b33e1, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/18ba966a00014453bcff87bb77841ad2] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp, totalSize=35.2 K 2024-11-28T09:23:24,118 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting a66bf0d88eb84a099073de78f0a9a4b1, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=16, earliestPutTs=1732785801029 2024-11-28T09:23:24,118 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 262c44a0a7404bf3b4dae265083b33e1, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=41, earliestPutTs=1732785801133 2024-11-28T09:23:24,118 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 18ba966a00014453bcff87bb77841ad2, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1732785802286 2024-11-28T09:23:24,130 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 59fa39bbf7af22b65ee5a2abca8580d1#A#compaction#592 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:23:24,131 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/3d30b4c8a67447849390082449be6957 is 175, key is test_row_0/A:col10/1732785802286/Put/seqid=0 2024-11-28T09:23:24,136 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 59fa39bbf7af22b65ee5a2abca8580d1#C#compaction#594 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:23:24,137 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/C/a9be6cbe4a21488fa508b017639b78bd is 50, key is test_row_0/C:col10/1732785802286/Put/seqid=0 2024-11-28T09:23:24,177 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=79, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/bad8f58fd48c44bb815b0a192e06ecde 2024-11-28T09:23:24,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742512_1688 (size=31058) 2024-11-28T09:23:24,187 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/3d30b4c8a67447849390082449be6957 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/3d30b4c8a67447849390082449be6957 2024-11-28T09:23:24,191 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 59fa39bbf7af22b65ee5a2abca8580d1/A of 59fa39bbf7af22b65ee5a2abca8580d1 into 3d30b4c8a67447849390082449be6957(size=30.3 K), total size for store is 30.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:23:24,191 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 59fa39bbf7af22b65ee5a2abca8580d1: 2024-11-28T09:23:24,191 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1., storeName=59fa39bbf7af22b65ee5a2abca8580d1/A, priority=13, startTime=1732785803608; duration=0sec 2024-11-28T09:23:24,192 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:23:24,192 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 59fa39bbf7af22b65ee5a2abca8580d1:A 2024-11-28T09:23:24,203 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/B/15123368771e4bc5b2ec278e3935dbb8 is 50, key is test_row_0/B:col10/1732785802978/Put/seqid=0 2024-11-28T09:23:24,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742513_1689 (size=12104) 2024-11-28T09:23:24,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742514_1690 (size=12001) 2024-11-28T09:23:24,231 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/B/15123368771e4bc5b2ec278e3935dbb8 2024-11-28T09:23:24,243 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/C/a5b5bdd67e23430889655c7c7398313e is 50, key is test_row_0/C:col10/1732785802978/Put/seqid=0 2024-11-28T09:23:24,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-28T09:23:24,260 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:23:24,260 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-11-28T09:23:24,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:24,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. as already flushing 2024-11-28T09:23:24,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:24,261 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:24,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:24,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:24,268 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:24,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38714 deadline: 1732785864267, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:24,274 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:24,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38682 deadline: 1732785864272, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:24,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742515_1691 (size=12001) 2024-11-28T09:23:24,310 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/C/a5b5bdd67e23430889655c7c7398313e 2024-11-28T09:23:24,316 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/bad8f58fd48c44bb815b0a192e06ecde as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/bad8f58fd48c44bb815b0a192e06ecde 2024-11-28T09:23:24,320 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/bad8f58fd48c44bb815b0a192e06ecde, entries=200, sequenceid=79, filesize=38.6 K 2024-11-28T09:23:24,321 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/B/15123368771e4bc5b2ec278e3935dbb8 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/15123368771e4bc5b2ec278e3935dbb8 2024-11-28T09:23:24,325 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/15123368771e4bc5b2ec278e3935dbb8, entries=150, sequenceid=79, filesize=11.7 K 2024-11-28T09:23:24,326 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/C/a5b5bdd67e23430889655c7c7398313e as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/a5b5bdd67e23430889655c7c7398313e 2024-11-28T09:23:24,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,330 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/a5b5bdd67e23430889655c7c7398313e, entries=150, sequenceid=79, filesize=11.7 K 2024-11-28T09:23:24,331 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 59fa39bbf7af22b65ee5a2abca8580d1 in 721ms, sequenceid=79, compaction requested=false 2024-11-28T09:23:24,331 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 59fa39bbf7af22b65ee5a2abca8580d1: 2024-11-28T09:23:24,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,412 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:23:24,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,412 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-11-28T09:23:24,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:24,413 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2837): Flushing 59fa39bbf7af22b65ee5a2abca8580d1 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-28T09:23:24,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59fa39bbf7af22b65ee5a2abca8580d1, store=A 2024-11-28T09:23:24,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:24,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59fa39bbf7af22b65ee5a2abca8580d1, store=B 2024-11-28T09:23:24,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:24,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59fa39bbf7af22b65ee5a2abca8580d1, store=C 2024-11-28T09:23:24,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:24,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,440 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411288ba37554f1814361810356f602c6b8fb_59fa39bbf7af22b65ee5a2abca8580d1 is 50, key is test_row_0/A:col10/1732785803648/Put/seqid=0 2024-11-28T09:23:24,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742516_1692 (size=9714) 2024-11-28T09:23:24,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,478 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,482 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411288ba37554f1814361810356f602c6b8fb_59fa39bbf7af22b65ee5a2abca8580d1 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411288ba37554f1814361810356f602c6b8fb_59fa39bbf7af22b65ee5a2abca8580d1 2024-11-28T09:23:24,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,483 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/03ace9dbe25f4a3288c47d85b2d41260, store: [table=TestAcidGuarantees family=A region=59fa39bbf7af22b65ee5a2abca8580d1] 2024-11-28T09:23:24,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,484 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/03ace9dbe25f4a3288c47d85b2d41260 is 175, key is test_row_0/A:col10/1732785803648/Put/seqid=0 2024-11-28T09:23:24,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742517_1693 (size=22361) 2024-11-28T09:23:24,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,536 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=93, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/03ace9dbe25f4a3288c47d85b2d41260 2024-11-28T09:23:24,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,556 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/B/77b8cab9f0b54c9ab54c8137a4ee99c8 is 50, key is test_row_0/B:col10/1732785803648/Put/seqid=0 2024-11-28T09:23:24,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742518_1694 (size=9657) 2024-11-28T09:23:24,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,601 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/B/77b8cab9f0b54c9ab54c8137a4ee99c8 2024-11-28T09:23:24,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,608 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/C/a9be6cbe4a21488fa508b017639b78bd as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/a9be6cbe4a21488fa508b017639b78bd 2024-11-28T09:23:24,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,613 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 59fa39bbf7af22b65ee5a2abca8580d1/C of 59fa39bbf7af22b65ee5a2abca8580d1 into a9be6cbe4a21488fa508b017639b78bd(size=11.8 K), total size for store is 23.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:23:24,613 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 59fa39bbf7af22b65ee5a2abca8580d1: 2024-11-28T09:23:24,613 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1., storeName=59fa39bbf7af22b65ee5a2abca8580d1/C, priority=13, startTime=1732785803608; duration=0sec 2024-11-28T09:23:24,613 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:23:24,613 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 59fa39bbf7af22b65ee5a2abca8580d1:C 2024-11-28T09:23:24,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,624 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/C/7b76ede38c914ddabae4c4bb65e893e0 is 50, key is test_row_0/C:col10/1732785803648/Put/seqid=0 2024-11-28T09:23:24,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742519_1695 (size=9657) 2024-11-28T09:23:24,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,689 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/C/7b76ede38c914ddabae4c4bb65e893e0 2024-11-28T09:23:24,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,694 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/03ace9dbe25f4a3288c47d85b2d41260 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/03ace9dbe25f4a3288c47d85b2d41260 2024-11-28T09:23:24,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,698 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/03ace9dbe25f4a3288c47d85b2d41260, entries=100, sequenceid=93, filesize=21.8 K 2024-11-28T09:23:24,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/B/77b8cab9f0b54c9ab54c8137a4ee99c8 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/77b8cab9f0b54c9ab54c8137a4ee99c8 2024-11-28T09:23:24,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,703 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/77b8cab9f0b54c9ab54c8137a4ee99c8, entries=100, sequenceid=93, filesize=9.4 K 2024-11-28T09:23:24,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/C/7b76ede38c914ddabae4c4bb65e893e0 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/7b76ede38c914ddabae4c4bb65e893e0 2024-11-28T09:23:24,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,708 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/7b76ede38c914ddabae4c4bb65e893e0, entries=100, sequenceid=93, filesize=9.4 K 2024-11-28T09:23:24,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,709 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=0 B/0 for 59fa39bbf7af22b65ee5a2abca8580d1 in 297ms, sequenceid=93, compaction requested=true 2024-11-28T09:23:24,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2538): Flush status journal for 59fa39bbf7af22b65ee5a2abca8580d1: 2024-11-28T09:23:24,710 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:24,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,710 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=166 2024-11-28T09:23:24,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4106): Remote procedure done, pid=166 2024-11-28T09:23:24,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,712 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=166, resume processing ppid=165 2024-11-28T09:23:24,713 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=166, ppid=165, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5580 sec 2024-11-28T09:23:24,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,714 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=165, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees in 1.5620 sec 2024-11-28T09:23:24,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,855 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 59fa39bbf7af22b65ee5a2abca8580d1 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-28T09:23:24,855 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59fa39bbf7af22b65ee5a2abca8580d1, store=A 2024-11-28T09:23:24,856 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:24,856 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59fa39bbf7af22b65ee5a2abca8580d1, store=B 2024-11-28T09:23:24,856 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:24,856 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59fa39bbf7af22b65ee5a2abca8580d1, store=C 2024-11-28T09:23:24,856 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:24,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 59fa39bbf7af22b65ee5a2abca8580d1 2024-11-28T09:23:24,892 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411285581e5bdfac9487abcba579ab0a2804f_59fa39bbf7af22b65ee5a2abca8580d1 is 50, key is test_row_0/A:col10/1732785804832/Put/seqid=0 2024-11-28T09:23:24,928 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742520_1696 (size=17034) 2024-11-28T09:23:24,929 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:24,933 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411285581e5bdfac9487abcba579ab0a2804f_59fa39bbf7af22b65ee5a2abca8580d1 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411285581e5bdfac9487abcba579ab0a2804f_59fa39bbf7af22b65ee5a2abca8580d1 2024-11-28T09:23:24,934 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/31e7439af3024478bcb404c72f2ec291, store: [table=TestAcidGuarantees family=A region=59fa39bbf7af22b65ee5a2abca8580d1] 2024-11-28T09:23:24,935 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/31e7439af3024478bcb404c72f2ec291 is 175, key is test_row_0/A:col10/1732785804832/Put/seqid=0 2024-11-28T09:23:24,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742521_1697 (size=48139) 2024-11-28T09:23:24,974 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=105, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/31e7439af3024478bcb404c72f2ec291 2024-11-28T09:23:24,988 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/B/6c78e266ca3147859289726cc22122f4 is 50, key is test_row_0/B:col10/1732785804832/Put/seqid=0 2024-11-28T09:23:25,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742522_1698 (size=12001) 2024-11-28T09:23:25,040 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=105 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/B/6c78e266ca3147859289726cc22122f4 2024-11-28T09:23:25,049 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:25,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38714 deadline: 1732785865041, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:25,056 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:25,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38682 deadline: 1732785865049, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:25,057 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/C/060ac75e7bff42c6b6b1087fea8d39a2 is 50, key is test_row_0/C:col10/1732785804832/Put/seqid=0 2024-11-28T09:23:25,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742523_1699 (size=12001) 2024-11-28T09:23:25,122 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:25,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38668 deadline: 1732785865118, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:25,129 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:25,131 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:25,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38722 deadline: 1732785865120, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:25,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38726 deadline: 1732785865119, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:25,159 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:25,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38714 deadline: 1732785865154, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:25,170 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:25,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38682 deadline: 1732785865159, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:25,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-28T09:23:25,257 INFO [Thread-2928 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 165 completed 2024-11-28T09:23:25,259 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T09:23:25,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] procedure2.ProcedureExecutor(1098): Stored pid=167, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees 2024-11-28T09:23:25,261 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=167, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T09:23:25,262 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=167, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T09:23:25,262 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=168, ppid=167, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T09:23:25,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-28T09:23:25,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-28T09:23:25,369 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:25,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38714 deadline: 1732785865365, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:25,385 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:25,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38682 deadline: 1732785865379, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:25,413 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:23:25,414 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-28T09:23:25,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:25,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. as already flushing 2024-11-28T09:23:25,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:25,414 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] handler.RSProcedureHandler(58): pid=168 java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:25,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=168 java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:25,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=168 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:25,505 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=105 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/C/060ac75e7bff42c6b6b1087fea8d39a2 2024-11-28T09:23:25,511 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/31e7439af3024478bcb404c72f2ec291 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/31e7439af3024478bcb404c72f2ec291 2024-11-28T09:23:25,516 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/31e7439af3024478bcb404c72f2ec291, entries=250, sequenceid=105, filesize=47.0 K 2024-11-28T09:23:25,517 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/B/6c78e266ca3147859289726cc22122f4 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/6c78e266ca3147859289726cc22122f4 2024-11-28T09:23:25,521 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/6c78e266ca3147859289726cc22122f4, entries=150, sequenceid=105, filesize=11.7 K 2024-11-28T09:23:25,522 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/C/060ac75e7bff42c6b6b1087fea8d39a2 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/060ac75e7bff42c6b6b1087fea8d39a2 2024-11-28T09:23:25,526 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/060ac75e7bff42c6b6b1087fea8d39a2, entries=150, sequenceid=105, filesize=11.7 K 2024-11-28T09:23:25,529 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 59fa39bbf7af22b65ee5a2abca8580d1 in 674ms, sequenceid=105, compaction requested=true 2024-11-28T09:23:25,529 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 59fa39bbf7af22b65ee5a2abca8580d1: 2024-11-28T09:23:25,529 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 59fa39bbf7af22b65ee5a2abca8580d1:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T09:23:25,529 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:23:25,529 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T09:23:25,529 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T09:23:25,529 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 59fa39bbf7af22b65ee5a2abca8580d1:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T09:23:25,529 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:23:25,529 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 59fa39bbf7af22b65ee5a2abca8580d1:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T09:23:25,529 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:23:25,530 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 45763 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T09:23:25,530 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): 59fa39bbf7af22b65ee5a2abca8580d1/B is initiating minor compaction (all files) 2024-11-28T09:23:25,530 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 59fa39bbf7af22b65ee5a2abca8580d1/B in TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:25,531 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/be4d0be1cf1944f7b84194034d58979b, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/15123368771e4bc5b2ec278e3935dbb8, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/77b8cab9f0b54c9ab54c8137a4ee99c8, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/6c78e266ca3147859289726cc22122f4] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp, totalSize=44.7 K 2024-11-28T09:23:25,531 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 141107 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T09:23:25,531 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1540): 59fa39bbf7af22b65ee5a2abca8580d1/A is initiating minor compaction (all files) 2024-11-28T09:23:25,531 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 59fa39bbf7af22b65ee5a2abca8580d1/A in TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:25,531 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting be4d0be1cf1944f7b84194034d58979b, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1732785802286 2024-11-28T09:23:25,531 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/3d30b4c8a67447849390082449be6957, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/bad8f58fd48c44bb815b0a192e06ecde, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/03ace9dbe25f4a3288c47d85b2d41260, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/31e7439af3024478bcb404c72f2ec291] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp, totalSize=137.8 K 2024-11-28T09:23:25,531 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:25,531 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. files: [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/3d30b4c8a67447849390082449be6957, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/bad8f58fd48c44bb815b0a192e06ecde, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/03ace9dbe25f4a3288c47d85b2d41260, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/31e7439af3024478bcb404c72f2ec291] 2024-11-28T09:23:25,532 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3d30b4c8a67447849390082449be6957, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1732785802286 2024-11-28T09:23:25,532 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 15123368771e4bc5b2ec278e3935dbb8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1732785802969 2024-11-28T09:23:25,532 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 77b8cab9f0b54c9ab54c8137a4ee99c8, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1732785803648 2024-11-28T09:23:25,532 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting bad8f58fd48c44bb815b0a192e06ecde, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1732785802969 2024-11-28T09:23:25,533 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 6c78e266ca3147859289726cc22122f4, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=105, earliestPutTs=1732785804832 2024-11-28T09:23:25,533 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 03ace9dbe25f4a3288c47d85b2d41260, keycount=100, bloomtype=ROW, size=21.8 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1732785803648 2024-11-28T09:23:25,534 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 31e7439af3024478bcb404c72f2ec291, keycount=250, bloomtype=ROW, size=47.0 K, encoding=NONE, compression=NONE, seqNum=105, earliestPutTs=1732785804832 2024-11-28T09:23:25,547 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=59fa39bbf7af22b65ee5a2abca8580d1] 2024-11-28T09:23:25,556 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 59fa39bbf7af22b65ee5a2abca8580d1#B#compaction#604 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:23:25,556 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/B/f6bce462935740fdadfba653721d1cdb is 50, key is test_row_0/B:col10/1732785804832/Put/seqid=0 2024-11-28T09:23:25,561 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241128cb6376a532da4a1fa46fe2c11cfe9e4e_59fa39bbf7af22b65ee5a2abca8580d1 store=[table=TestAcidGuarantees family=A region=59fa39bbf7af22b65ee5a2abca8580d1] 2024-11-28T09:23:25,564 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241128cb6376a532da4a1fa46fe2c11cfe9e4e_59fa39bbf7af22b65ee5a2abca8580d1, store=[table=TestAcidGuarantees family=A region=59fa39bbf7af22b65ee5a2abca8580d1] 2024-11-28T09:23:25,565 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128cb6376a532da4a1fa46fe2c11cfe9e4e_59fa39bbf7af22b65ee5a2abca8580d1 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=59fa39bbf7af22b65ee5a2abca8580d1] 2024-11-28T09:23:25,569 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:23:25,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-28T09:23:25,570 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-28T09:23:25,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:25,570 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2837): Flushing 59fa39bbf7af22b65ee5a2abca8580d1 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-28T09:23:25,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59fa39bbf7af22b65ee5a2abca8580d1, store=A 2024-11-28T09:23:25,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:25,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59fa39bbf7af22b65ee5a2abca8580d1, store=B 2024-11-28T09:23:25,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:25,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59fa39bbf7af22b65ee5a2abca8580d1, store=C 2024-11-28T09:23:25,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:25,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742524_1700 (size=12241) 2024-11-28T09:23:25,631 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/B/f6bce462935740fdadfba653721d1cdb as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/f6bce462935740fdadfba653721d1cdb 2024-11-28T09:23:25,637 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 59fa39bbf7af22b65ee5a2abca8580d1/B of 59fa39bbf7af22b65ee5a2abca8580d1 into f6bce462935740fdadfba653721d1cdb(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:23:25,637 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 59fa39bbf7af22b65ee5a2abca8580d1: 2024-11-28T09:23:25,637 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1., storeName=59fa39bbf7af22b65ee5a2abca8580d1/B, priority=12, startTime=1732785805529; duration=0sec 2024-11-28T09:23:25,637 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:23:25,637 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 59fa39bbf7af22b65ee5a2abca8580d1:B 2024-11-28T09:23:25,637 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T09:23:25,640 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 45763 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T09:23:25,640 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): 59fa39bbf7af22b65ee5a2abca8580d1/C is initiating minor compaction (all files) 2024-11-28T09:23:25,640 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 59fa39bbf7af22b65ee5a2abca8580d1/C in TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:25,640 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/a9be6cbe4a21488fa508b017639b78bd, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/a5b5bdd67e23430889655c7c7398313e, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/7b76ede38c914ddabae4c4bb65e893e0, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/060ac75e7bff42c6b6b1087fea8d39a2] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp, totalSize=44.7 K 2024-11-28T09:23:25,641 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting a9be6cbe4a21488fa508b017639b78bd, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1732785802286 2024-11-28T09:23:25,641 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting a5b5bdd67e23430889655c7c7398313e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1732785802969 2024-11-28T09:23:25,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411283c051ab2c3ac42479aef4ddc9878622d_59fa39bbf7af22b65ee5a2abca8580d1 is 50, key is test_row_0/A:col10/1732785805046/Put/seqid=0 2024-11-28T09:23:25,644 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 7b76ede38c914ddabae4c4bb65e893e0, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=93, earliestPutTs=1732785803648 2024-11-28T09:23:25,645 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 060ac75e7bff42c6b6b1087fea8d39a2, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=105, earliestPutTs=1732785804832 2024-11-28T09:23:25,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742525_1701 (size=4469) 2024-11-28T09:23:25,672 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 59fa39bbf7af22b65ee5a2abca8580d1#A#compaction#603 average throughput is 0.20 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:23:25,672 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/cc306f1fc84745579266269d599d26df is 175, key is test_row_0/A:col10/1732785804832/Put/seqid=0 2024-11-28T09:23:25,683 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 59fa39bbf7af22b65ee5a2abca8580d1#C#compaction#606 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:23:25,684 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/C/58501be2adff41b29537eb92a4d4e080 is 50, key is test_row_0/C:col10/1732785804832/Put/seqid=0 2024-11-28T09:23:25,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 59fa39bbf7af22b65ee5a2abca8580d1 2024-11-28T09:23:25,689 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. as already flushing 2024-11-28T09:23:25,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742526_1702 (size=12204) 2024-11-28T09:23:25,696 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:25,703 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411283c051ab2c3ac42479aef4ddc9878622d_59fa39bbf7af22b65ee5a2abca8580d1 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411283c051ab2c3ac42479aef4ddc9878622d_59fa39bbf7af22b65ee5a2abca8580d1 2024-11-28T09:23:25,705 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/fc8db60454b54dd196f1013e722762e9, store: [table=TestAcidGuarantees family=A region=59fa39bbf7af22b65ee5a2abca8580d1] 2024-11-28T09:23:25,706 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/fc8db60454b54dd196f1013e722762e9 is 175, key is test_row_0/A:col10/1732785805046/Put/seqid=0 2024-11-28T09:23:25,730 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:25,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38714 deadline: 1732785865723, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:25,731 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:25,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38682 deadline: 1732785865723, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:25,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742527_1703 (size=31195) 2024-11-28T09:23:25,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742528_1704 (size=12241) 2024-11-28T09:23:25,741 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/cc306f1fc84745579266269d599d26df as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/cc306f1fc84745579266269d599d26df 2024-11-28T09:23:25,745 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/C/58501be2adff41b29537eb92a4d4e080 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/58501be2adff41b29537eb92a4d4e080 2024-11-28T09:23:25,747 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 59fa39bbf7af22b65ee5a2abca8580d1/A of 59fa39bbf7af22b65ee5a2abca8580d1 into cc306f1fc84745579266269d599d26df(size=30.5 K), total size for store is 30.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:23:25,747 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 59fa39bbf7af22b65ee5a2abca8580d1: 2024-11-28T09:23:25,747 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1., storeName=59fa39bbf7af22b65ee5a2abca8580d1/A, priority=12, startTime=1732785805529; duration=0sec 2024-11-28T09:23:25,747 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:23:25,747 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 59fa39bbf7af22b65ee5a2abca8580d1:A 2024-11-28T09:23:25,751 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 59fa39bbf7af22b65ee5a2abca8580d1/C of 59fa39bbf7af22b65ee5a2abca8580d1 into 58501be2adff41b29537eb92a4d4e080(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:23:25,751 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 59fa39bbf7af22b65ee5a2abca8580d1: 2024-11-28T09:23:25,751 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1., storeName=59fa39bbf7af22b65ee5a2abca8580d1/C, priority=12, startTime=1732785805529; duration=0sec 2024-11-28T09:23:25,751 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:23:25,752 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 59fa39bbf7af22b65ee5a2abca8580d1:C 2024-11-28T09:23:25,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742529_1705 (size=31005) 2024-11-28T09:23:25,763 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=130, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/fc8db60454b54dd196f1013e722762e9 2024-11-28T09:23:25,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/B/8bf32f3fca90430392061ebbde956d9b is 50, key is test_row_0/B:col10/1732785805046/Put/seqid=0 2024-11-28T09:23:25,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742530_1706 (size=12051) 2024-11-28T09:23:25,835 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:25,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38714 deadline: 1732785865832, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:25,836 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:25,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38682 deadline: 1732785865832, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:25,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-28T09:23:26,039 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:26,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38714 deadline: 1732785866037, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:26,039 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:26,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38682 deadline: 1732785866038, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:26,256 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=130 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/B/8bf32f3fca90430392061ebbde956d9b 2024-11-28T09:23:26,267 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/C/2160468e218f42c6bdf93d0ff1e45ee4 is 50, key is test_row_0/C:col10/1732785805046/Put/seqid=0 2024-11-28T09:23:26,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742531_1707 (size=12051) 2024-11-28T09:23:26,344 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:26,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38682 deadline: 1732785866341, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:26,344 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:26,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38714 deadline: 1732785866342, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:26,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-28T09:23:26,721 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=130 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/C/2160468e218f42c6bdf93d0ff1e45ee4 2024-11-28T09:23:26,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/fc8db60454b54dd196f1013e722762e9 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/fc8db60454b54dd196f1013e722762e9 2024-11-28T09:23:26,730 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/fc8db60454b54dd196f1013e722762e9, entries=150, sequenceid=130, filesize=30.3 K 2024-11-28T09:23:26,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/B/8bf32f3fca90430392061ebbde956d9b as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/8bf32f3fca90430392061ebbde956d9b 2024-11-28T09:23:26,735 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/8bf32f3fca90430392061ebbde956d9b, entries=150, sequenceid=130, filesize=11.8 K 2024-11-28T09:23:26,736 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/C/2160468e218f42c6bdf93d0ff1e45ee4 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/2160468e218f42c6bdf93d0ff1e45ee4 2024-11-28T09:23:26,740 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/2160468e218f42c6bdf93d0ff1e45ee4, entries=150, sequenceid=130, filesize=11.8 K 2024-11-28T09:23:26,741 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 59fa39bbf7af22b65ee5a2abca8580d1 in 1171ms, sequenceid=130, compaction requested=false 2024-11-28T09:23:26,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2538): Flush status journal for 59fa39bbf7af22b65ee5a2abca8580d1: 2024-11-28T09:23:26,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:26,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=168 2024-11-28T09:23:26,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4106): Remote procedure done, pid=168 2024-11-28T09:23:26,745 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=168, resume processing ppid=167 2024-11-28T09:23:26,745 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=168, ppid=167, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4800 sec 2024-11-28T09:23:26,747 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=167, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees in 1.4860 sec 2024-11-28T09:23:26,849 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 59fa39bbf7af22b65ee5a2abca8580d1 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-28T09:23:26,849 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59fa39bbf7af22b65ee5a2abca8580d1, store=A 2024-11-28T09:23:26,849 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:26,849 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59fa39bbf7af22b65ee5a2abca8580d1, store=B 2024-11-28T09:23:26,849 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:26,849 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59fa39bbf7af22b65ee5a2abca8580d1, store=C 2024-11-28T09:23:26,849 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:26,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 59fa39bbf7af22b65ee5a2abca8580d1 2024-11-28T09:23:26,874 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112807bd94191b3e496191eae70369ed5454_59fa39bbf7af22b65ee5a2abca8580d1 is 50, key is test_row_0/A:col10/1732785805715/Put/seqid=0 2024-11-28T09:23:26,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742532_1708 (size=12304) 2024-11-28T09:23:26,884 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:26,888 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112807bd94191b3e496191eae70369ed5454_59fa39bbf7af22b65ee5a2abca8580d1 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112807bd94191b3e496191eae70369ed5454_59fa39bbf7af22b65ee5a2abca8580d1 2024-11-28T09:23:26,894 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/fc29ca6f368d4d7f95c862f27b5d2813, store: [table=TestAcidGuarantees family=A region=59fa39bbf7af22b65ee5a2abca8580d1] 2024-11-28T09:23:26,895 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/fc29ca6f368d4d7f95c862f27b5d2813 is 175, key is test_row_0/A:col10/1732785805715/Put/seqid=0 2024-11-28T09:23:26,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742533_1709 (size=31105) 2024-11-28T09:23:26,930 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:26,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38682 deadline: 1732785866923, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:26,931 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:26,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38714 deadline: 1732785866924, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:27,034 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:27,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38682 deadline: 1732785867032, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:27,034 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:27,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38714 deadline: 1732785867033, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:27,133 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:27,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38668 deadline: 1732785867131, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:27,134 DEBUG [Thread-2926 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4158 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1., hostname=363d8d38a970,33819,1732785660637, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-28T09:23:27,135 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:27,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38726 deadline: 1732785867134, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:27,136 DEBUG [Thread-2918 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4158 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1., hostname=363d8d38a970,33819,1732785660637, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-28T09:23:27,139 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:27,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38722 deadline: 1732785867136, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:27,140 DEBUG [Thread-2922 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4162 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1., hostname=363d8d38a970,33819,1732785660637, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-28T09:23:27,238 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:27,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38682 deadline: 1732785867236, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:27,239 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:27,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38714 deadline: 1732785867236, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:27,317 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=145, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/fc29ca6f368d4d7f95c862f27b5d2813 2024-11-28T09:23:27,341 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/B/854879c7ce774d96aa9bb180478628bb is 50, key is test_row_0/B:col10/1732785805715/Put/seqid=0 2024-11-28T09:23:27,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-28T09:23:27,372 INFO [Thread-2928 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 167 completed 2024-11-28T09:23:27,373 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T09:23:27,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] procedure2.ProcedureExecutor(1098): Stored pid=169, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=169, table=TestAcidGuarantees 2024-11-28T09:23:27,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-28T09:23:27,375 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=169, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=169, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T09:23:27,376 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=169, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=169, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T09:23:27,376 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=170, ppid=169, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T09:23:27,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742534_1710 (size=12151) 2024-11-28T09:23:27,385 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=145 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/B/854879c7ce774d96aa9bb180478628bb 2024-11-28T09:23:27,394 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/C/c8ee2fd6158d4f4196703b83a5a758b6 is 50, key is test_row_0/C:col10/1732785805715/Put/seqid=0 2024-11-28T09:23:27,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742535_1711 (size=12151) 2024-11-28T09:23:27,434 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=145 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/C/c8ee2fd6158d4f4196703b83a5a758b6 2024-11-28T09:23:27,439 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/fc29ca6f368d4d7f95c862f27b5d2813 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/fc29ca6f368d4d7f95c862f27b5d2813 2024-11-28T09:23:27,446 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/fc29ca6f368d4d7f95c862f27b5d2813, entries=150, sequenceid=145, filesize=30.4 K 2024-11-28T09:23:27,447 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/B/854879c7ce774d96aa9bb180478628bb as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/854879c7ce774d96aa9bb180478628bb 2024-11-28T09:23:27,452 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/854879c7ce774d96aa9bb180478628bb, entries=150, sequenceid=145, filesize=11.9 K 2024-11-28T09:23:27,453 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/C/c8ee2fd6158d4f4196703b83a5a758b6 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/c8ee2fd6158d4f4196703b83a5a758b6 2024-11-28T09:23:27,459 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/c8ee2fd6158d4f4196703b83a5a758b6, entries=150, sequenceid=145, filesize=11.9 K 2024-11-28T09:23:27,460 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 59fa39bbf7af22b65ee5a2abca8580d1 in 611ms, sequenceid=145, compaction requested=true 2024-11-28T09:23:27,460 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 59fa39bbf7af22b65ee5a2abca8580d1: 2024-11-28T09:23:27,460 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:23:27,461 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 59fa39bbf7af22b65ee5a2abca8580d1:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T09:23:27,461 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:23:27,461 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:23:27,461 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 59fa39bbf7af22b65ee5a2abca8580d1:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T09:23:27,461 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:23:27,461 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 59fa39bbf7af22b65ee5a2abca8580d1:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T09:23:27,461 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:23:27,462 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93305 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:23:27,462 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1540): 59fa39bbf7af22b65ee5a2abca8580d1/A is initiating minor compaction (all files) 2024-11-28T09:23:27,462 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 59fa39bbf7af22b65ee5a2abca8580d1/A in TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:27,462 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/cc306f1fc84745579266269d599d26df, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/fc8db60454b54dd196f1013e722762e9, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/fc29ca6f368d4d7f95c862f27b5d2813] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp, totalSize=91.1 K 2024-11-28T09:23:27,462 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:27,462 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. files: [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/cc306f1fc84745579266269d599d26df, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/fc8db60454b54dd196f1013e722762e9, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/fc29ca6f368d4d7f95c862f27b5d2813] 2024-11-28T09:23:27,462 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36443 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:23:27,462 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): 59fa39bbf7af22b65ee5a2abca8580d1/B is initiating minor compaction (all files) 2024-11-28T09:23:27,462 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 59fa39bbf7af22b65ee5a2abca8580d1/B in TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:27,462 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/f6bce462935740fdadfba653721d1cdb, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/8bf32f3fca90430392061ebbde956d9b, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/854879c7ce774d96aa9bb180478628bb] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp, totalSize=35.6 K 2024-11-28T09:23:27,463 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting cc306f1fc84745579266269d599d26df, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=105, earliestPutTs=1732785804832 2024-11-28T09:23:27,463 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting f6bce462935740fdadfba653721d1cdb, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=105, earliestPutTs=1732785804832 2024-11-28T09:23:27,464 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting fc8db60454b54dd196f1013e722762e9, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1732785805027 2024-11-28T09:23:27,464 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting fc29ca6f368d4d7f95c862f27b5d2813, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=145, earliestPutTs=1732785805715 2024-11-28T09:23:27,464 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 8bf32f3fca90430392061ebbde956d9b, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1732785805027 2024-11-28T09:23:27,465 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 854879c7ce774d96aa9bb180478628bb, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=145, earliestPutTs=1732785805715 2024-11-28T09:23:27,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-28T09:23:27,481 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=59fa39bbf7af22b65ee5a2abca8580d1] 2024-11-28T09:23:27,496 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 59fa39bbf7af22b65ee5a2abca8580d1#B#compaction#613 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:23:27,497 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/B/664acb26643348438b65f43d30cc7b79 is 50, key is test_row_0/B:col10/1732785805715/Put/seqid=0 2024-11-28T09:23:27,505 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241128d21d75cf6eb447f1a0d24ab4ebe59ed7_59fa39bbf7af22b65ee5a2abca8580d1 store=[table=TestAcidGuarantees family=A region=59fa39bbf7af22b65ee5a2abca8580d1] 2024-11-28T09:23:27,507 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241128d21d75cf6eb447f1a0d24ab4ebe59ed7_59fa39bbf7af22b65ee5a2abca8580d1, store=[table=TestAcidGuarantees family=A region=59fa39bbf7af22b65ee5a2abca8580d1] 2024-11-28T09:23:27,508 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128d21d75cf6eb447f1a0d24ab4ebe59ed7_59fa39bbf7af22b65ee5a2abca8580d1 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=59fa39bbf7af22b65ee5a2abca8580d1] 2024-11-28T09:23:27,536 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:23:27,537 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-28T09:23:27,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:27,537 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2837): Flushing 59fa39bbf7af22b65ee5a2abca8580d1 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-28T09:23:27,538 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59fa39bbf7af22b65ee5a2abca8580d1, store=A 2024-11-28T09:23:27,538 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:27,538 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59fa39bbf7af22b65ee5a2abca8580d1, store=B 2024-11-28T09:23:27,538 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:27,538 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59fa39bbf7af22b65ee5a2abca8580d1, store=C 2024-11-28T09:23:27,538 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:27,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742536_1712 (size=12493) 2024-11-28T09:23:27,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 59fa39bbf7af22b65ee5a2abca8580d1 2024-11-28T09:23:27,545 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. as already flushing 2024-11-28T09:23:27,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742537_1713 (size=4469) 2024-11-28T09:23:27,558 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 59fa39bbf7af22b65ee5a2abca8580d1#A#compaction#612 average throughput is 0.32 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:23:27,559 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/68365344f4ce446fba3eda611768d7a2 is 175, key is test_row_0/A:col10/1732785805715/Put/seqid=0 2024-11-28T09:23:27,568 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:27,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38714 deadline: 1732785867561, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:27,571 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:27,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38682 deadline: 1732785867568, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:27,588 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128ceda8f08e2c140c1846ed11ba8fb146e_59fa39bbf7af22b65ee5a2abca8580d1 is 50, key is test_row_0/A:col10/1732785806920/Put/seqid=0 2024-11-28T09:23:27,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742538_1714 (size=31447) 2024-11-28T09:23:27,615 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/68365344f4ce446fba3eda611768d7a2 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/68365344f4ce446fba3eda611768d7a2 2024-11-28T09:23:27,622 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 59fa39bbf7af22b65ee5a2abca8580d1/A of 59fa39bbf7af22b65ee5a2abca8580d1 into 68365344f4ce446fba3eda611768d7a2(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:23:27,622 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 59fa39bbf7af22b65ee5a2abca8580d1: 2024-11-28T09:23:27,622 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1., storeName=59fa39bbf7af22b65ee5a2abca8580d1/A, priority=13, startTime=1732785807460; duration=0sec 2024-11-28T09:23:27,622 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:23:27,622 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 59fa39bbf7af22b65ee5a2abca8580d1:A 2024-11-28T09:23:27,622 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:23:27,625 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36443 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:23:27,625 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1540): 59fa39bbf7af22b65ee5a2abca8580d1/C is initiating minor compaction (all files) 2024-11-28T09:23:27,625 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 59fa39bbf7af22b65ee5a2abca8580d1/C in TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:27,625 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/58501be2adff41b29537eb92a4d4e080, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/2160468e218f42c6bdf93d0ff1e45ee4, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/c8ee2fd6158d4f4196703b83a5a758b6] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp, totalSize=35.6 K 2024-11-28T09:23:27,626 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 58501be2adff41b29537eb92a4d4e080, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=105, earliestPutTs=1732785804832 2024-11-28T09:23:27,626 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2160468e218f42c6bdf93d0ff1e45ee4, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=130, earliestPutTs=1732785805027 2024-11-28T09:23:27,627 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting c8ee2fd6158d4f4196703b83a5a758b6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=145, earliestPutTs=1732785805715 2024-11-28T09:23:27,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742539_1715 (size=12304) 2024-11-28T09:23:27,638 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 59fa39bbf7af22b65ee5a2abca8580d1#C#compaction#615 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:23:27,639 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/C/fb80aa85f98443bd97e892c89ef489d0 is 50, key is test_row_0/C:col10/1732785805715/Put/seqid=0 2024-11-28T09:23:27,674 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:27,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38714 deadline: 1732785867670, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:27,675 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:27,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38682 deadline: 1732785867673, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:27,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-28T09:23:27,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742540_1716 (size=12493) 2024-11-28T09:23:27,879 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:27,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38714 deadline: 1732785867877, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:27,879 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:27,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38682 deadline: 1732785867878, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:27,950 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/B/664acb26643348438b65f43d30cc7b79 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/664acb26643348438b65f43d30cc7b79 2024-11-28T09:23:27,957 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 59fa39bbf7af22b65ee5a2abca8580d1/B of 59fa39bbf7af22b65ee5a2abca8580d1 into 664acb26643348438b65f43d30cc7b79(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:23:27,957 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 59fa39bbf7af22b65ee5a2abca8580d1: 2024-11-28T09:23:27,957 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1., storeName=59fa39bbf7af22b65ee5a2abca8580d1/B, priority=13, startTime=1732785807461; duration=0sec 2024-11-28T09:23:27,957 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:23:27,957 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 59fa39bbf7af22b65ee5a2abca8580d1:B 2024-11-28T09:23:27,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-28T09:23:28,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:28,037 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128ceda8f08e2c140c1846ed11ba8fb146e_59fa39bbf7af22b65ee5a2abca8580d1 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128ceda8f08e2c140c1846ed11ba8fb146e_59fa39bbf7af22b65ee5a2abca8580d1 2024-11-28T09:23:28,038 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/92d685df025b4f48ad785f7d47651752, store: [table=TestAcidGuarantees family=A region=59fa39bbf7af22b65ee5a2abca8580d1] 2024-11-28T09:23:28,039 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/92d685df025b4f48ad785f7d47651752 is 175, key is test_row_0/A:col10/1732785806920/Put/seqid=0 2024-11-28T09:23:28,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742541_1717 (size=31105) 2024-11-28T09:23:28,083 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=169, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/92d685df025b4f48ad785f7d47651752 2024-11-28T09:23:28,093 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/C/fb80aa85f98443bd97e892c89ef489d0 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/fb80aa85f98443bd97e892c89ef489d0 2024-11-28T09:23:28,098 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 59fa39bbf7af22b65ee5a2abca8580d1/C of 59fa39bbf7af22b65ee5a2abca8580d1 into fb80aa85f98443bd97e892c89ef489d0(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:23:28,098 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 59fa39bbf7af22b65ee5a2abca8580d1: 2024-11-28T09:23:28,098 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1., storeName=59fa39bbf7af22b65ee5a2abca8580d1/C, priority=13, startTime=1732785807461; duration=0sec 2024-11-28T09:23:28,098 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:23:28,098 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 59fa39bbf7af22b65ee5a2abca8580d1:C 2024-11-28T09:23:28,101 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/B/f0e54a1cd5294af886768c799968d784 is 50, key is test_row_0/B:col10/1732785806920/Put/seqid=0 2024-11-28T09:23:28,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742542_1718 (size=12151) 2024-11-28T09:23:28,154 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=169 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/B/f0e54a1cd5294af886768c799968d784 2024-11-28T09:23:28,173 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/C/372184545bd848f2b75343a108a9c845 is 50, key is test_row_0/C:col10/1732785806920/Put/seqid=0 2024-11-28T09:23:28,183 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:28,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38714 deadline: 1732785868182, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:28,186 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:28,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38682 deadline: 1732785868183, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:28,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742543_1719 (size=12151) 2024-11-28T09:23:28,223 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=169 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/C/372184545bd848f2b75343a108a9c845 2024-11-28T09:23:28,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/92d685df025b4f48ad785f7d47651752 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/92d685df025b4f48ad785f7d47651752 2024-11-28T09:23:28,231 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/92d685df025b4f48ad785f7d47651752, entries=150, sequenceid=169, filesize=30.4 K 2024-11-28T09:23:28,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/B/f0e54a1cd5294af886768c799968d784 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/f0e54a1cd5294af886768c799968d784 2024-11-28T09:23:28,237 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/f0e54a1cd5294af886768c799968d784, entries=150, sequenceid=169, filesize=11.9 K 2024-11-28T09:23:28,238 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/C/372184545bd848f2b75343a108a9c845 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/372184545bd848f2b75343a108a9c845 2024-11-28T09:23:28,245 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/372184545bd848f2b75343a108a9c845, entries=150, sequenceid=169, filesize=11.9 K 2024-11-28T09:23:28,246 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 59fa39bbf7af22b65ee5a2abca8580d1 in 709ms, sequenceid=169, compaction requested=false 2024-11-28T09:23:28,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2538): Flush status journal for 59fa39bbf7af22b65ee5a2abca8580d1: 2024-11-28T09:23:28,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:28,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=170 2024-11-28T09:23:28,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4106): Remote procedure done, pid=170 2024-11-28T09:23:28,250 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=170, resume processing ppid=169 2024-11-28T09:23:28,250 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=170, ppid=169, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 872 msec 2024-11-28T09:23:28,251 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=169, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=169, table=TestAcidGuarantees in 877 msec 2024-11-28T09:23:28,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-28T09:23:28,479 INFO [Thread-2928 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 169 completed 2024-11-28T09:23:28,481 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T09:23:28,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] procedure2.ProcedureExecutor(1098): Stored pid=171, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees 2024-11-28T09:23:28,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-28T09:23:28,482 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=171, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T09:23:28,483 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=171, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T09:23:28,483 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=172, ppid=171, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T09:23:28,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-28T09:23:28,635 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:23:28,635 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-11-28T09:23:28,635 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:28,636 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2837): Flushing 59fa39bbf7af22b65ee5a2abca8580d1 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-28T09:23:28,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59fa39bbf7af22b65ee5a2abca8580d1, store=A 2024-11-28T09:23:28,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:28,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59fa39bbf7af22b65ee5a2abca8580d1, store=B 2024-11-28T09:23:28,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:28,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59fa39bbf7af22b65ee5a2abca8580d1, store=C 2024-11-28T09:23:28,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:28,663 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128c7c53c3be9f64eb1a749d8e022518473_59fa39bbf7af22b65ee5a2abca8580d1 is 50, key is test_row_0/A:col10/1732785807545/Put/seqid=0 2024-11-28T09:23:28,688 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. as already flushing 2024-11-28T09:23:28,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 59fa39bbf7af22b65ee5a2abca8580d1 2024-11-28T09:23:28,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742544_1720 (size=12304) 2024-11-28T09:23:28,738 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:28,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38714 deadline: 1732785868735, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:28,740 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:28,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38682 deadline: 1732785868737, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:28,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-28T09:23:28,842 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:28,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38714 deadline: 1732785868839, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:28,843 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:28,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38682 deadline: 1732785868841, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:28,986 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-28T09:23:29,046 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:29,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38714 deadline: 1732785869044, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:29,047 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:29,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38682 deadline: 1732785869044, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:29,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-28T09:23:29,100 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:29,104 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128c7c53c3be9f64eb1a749d8e022518473_59fa39bbf7af22b65ee5a2abca8580d1 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128c7c53c3be9f64eb1a749d8e022518473_59fa39bbf7af22b65ee5a2abca8580d1 2024-11-28T09:23:29,105 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/dc2a6c6c0ad942f5a0b72bb6c823af51, store: [table=TestAcidGuarantees family=A region=59fa39bbf7af22b65ee5a2abca8580d1] 2024-11-28T09:23:29,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/dc2a6c6c0ad942f5a0b72bb6c823af51 is 175, key is test_row_0/A:col10/1732785807545/Put/seqid=0 2024-11-28T09:23:29,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742545_1721 (size=31105) 2024-11-28T09:23:29,349 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:29,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38714 deadline: 1732785869347, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:29,353 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:29,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38682 deadline: 1732785869350, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:29,550 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=184, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/dc2a6c6c0ad942f5a0b72bb6c823af51 2024-11-28T09:23:29,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/B/f9e7a1f9c7a64e42a3ca85531da73066 is 50, key is test_row_0/B:col10/1732785807545/Put/seqid=0 2024-11-28T09:23:29,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-28T09:23:29,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742546_1722 (size=12151) 2024-11-28T09:23:29,607 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=184 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/B/f9e7a1f9c7a64e42a3ca85531da73066 2024-11-28T09:23:29,622 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/C/a01c8e96fd384b919f73697e1d4fa281 is 50, key is test_row_0/C:col10/1732785807545/Put/seqid=0 2024-11-28T09:23:29,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742547_1723 (size=12151) 2024-11-28T09:23:29,649 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=184 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/C/a01c8e96fd384b919f73697e1d4fa281 2024-11-28T09:23:29,655 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/dc2a6c6c0ad942f5a0b72bb6c823af51 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/dc2a6c6c0ad942f5a0b72bb6c823af51 2024-11-28T09:23:29,659 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/dc2a6c6c0ad942f5a0b72bb6c823af51, entries=150, sequenceid=184, filesize=30.4 K 2024-11-28T09:23:29,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/B/f9e7a1f9c7a64e42a3ca85531da73066 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/f9e7a1f9c7a64e42a3ca85531da73066 2024-11-28T09:23:29,665 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/f9e7a1f9c7a64e42a3ca85531da73066, entries=150, sequenceid=184, filesize=11.9 K 2024-11-28T09:23:29,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/C/a01c8e96fd384b919f73697e1d4fa281 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/a01c8e96fd384b919f73697e1d4fa281 2024-11-28T09:23:29,670 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/a01c8e96fd384b919f73697e1d4fa281, entries=150, sequenceid=184, filesize=11.9 K 2024-11-28T09:23:29,671 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 59fa39bbf7af22b65ee5a2abca8580d1 in 1035ms, sequenceid=184, compaction requested=true 2024-11-28T09:23:29,671 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2538): Flush status journal for 59fa39bbf7af22b65ee5a2abca8580d1: 2024-11-28T09:23:29,671 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:29,671 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=172 2024-11-28T09:23:29,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4106): Remote procedure done, pid=172 2024-11-28T09:23:29,686 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=172, resume processing ppid=171 2024-11-28T09:23:29,686 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=172, ppid=171, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2020 sec 2024-11-28T09:23:29,687 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=171, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees in 1.2050 sec 2024-11-28T09:23:29,864 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 59fa39bbf7af22b65ee5a2abca8580d1 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-28T09:23:29,865 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59fa39bbf7af22b65ee5a2abca8580d1, store=A 2024-11-28T09:23:29,865 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:29,865 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59fa39bbf7af22b65ee5a2abca8580d1, store=B 2024-11-28T09:23:29,865 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:29,865 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59fa39bbf7af22b65ee5a2abca8580d1, store=C 2024-11-28T09:23:29,865 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:29,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 59fa39bbf7af22b65ee5a2abca8580d1 2024-11-28T09:23:29,886 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128eccb719cf25e4e99a62fc2564f215011_59fa39bbf7af22b65ee5a2abca8580d1 is 50, key is test_row_0/A:col10/1732785809863/Put/seqid=0 2024-11-28T09:23:29,891 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:29,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38714 deadline: 1732785869887, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:29,892 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:29,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38682 deadline: 1732785869888, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:29,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742548_1724 (size=14794) 2024-11-28T09:23:29,932 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:29,936 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128eccb719cf25e4e99a62fc2564f215011_59fa39bbf7af22b65ee5a2abca8580d1 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128eccb719cf25e4e99a62fc2564f215011_59fa39bbf7af22b65ee5a2abca8580d1 2024-11-28T09:23:29,937 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/40fa901fe07c4e9da006d64742f15498, store: [table=TestAcidGuarantees family=A region=59fa39bbf7af22b65ee5a2abca8580d1] 2024-11-28T09:23:29,938 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/40fa901fe07c4e9da006d64742f15498 is 175, key is test_row_0/A:col10/1732785809863/Put/seqid=0 2024-11-28T09:23:29,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742549_1725 (size=39749) 2024-11-28T09:23:29,959 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=209, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/40fa901fe07c4e9da006d64742f15498 2024-11-28T09:23:29,981 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/B/6f936f5cfe574c399c377a68a5d3e6df is 50, key is test_row_0/B:col10/1732785809863/Put/seqid=0 2024-11-28T09:23:29,996 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:29,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38714 deadline: 1732785869993, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:29,997 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:29,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38682 deadline: 1732785869993, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:30,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742550_1726 (size=12151) 2024-11-28T09:23:30,037 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=209 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/B/6f936f5cfe574c399c377a68a5d3e6df 2024-11-28T09:23:30,066 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/C/858ff778ec174426ab09f0cf756ea8b0 is 50, key is test_row_0/C:col10/1732785809863/Put/seqid=0 2024-11-28T09:23:30,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742551_1727 (size=12151) 2024-11-28T09:23:30,201 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:30,201 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38682 deadline: 1732785870199, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:30,202 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:30,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38714 deadline: 1732785870199, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:30,507 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:30,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38682 deadline: 1732785870504, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:30,508 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:30,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38714 deadline: 1732785870505, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:30,511 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=209 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/C/858ff778ec174426ab09f0cf756ea8b0 2024-11-28T09:23:30,515 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/40fa901fe07c4e9da006d64742f15498 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/40fa901fe07c4e9da006d64742f15498 2024-11-28T09:23:30,518 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/40fa901fe07c4e9da006d64742f15498, entries=200, sequenceid=209, filesize=38.8 K 2024-11-28T09:23:30,519 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/B/6f936f5cfe574c399c377a68a5d3e6df as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/6f936f5cfe574c399c377a68a5d3e6df 2024-11-28T09:23:30,530 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/6f936f5cfe574c399c377a68a5d3e6df, entries=150, sequenceid=209, filesize=11.9 K 2024-11-28T09:23:30,532 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/C/858ff778ec174426ab09f0cf756ea8b0 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/858ff778ec174426ab09f0cf756ea8b0 2024-11-28T09:23:30,537 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/858ff778ec174426ab09f0cf756ea8b0, entries=150, sequenceid=209, filesize=11.9 K 2024-11-28T09:23:30,537 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 59fa39bbf7af22b65ee5a2abca8580d1 in 673ms, sequenceid=209, compaction requested=true 2024-11-28T09:23:30,537 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 59fa39bbf7af22b65ee5a2abca8580d1: 2024-11-28T09:23:30,538 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T09:23:30,538 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 59fa39bbf7af22b65ee5a2abca8580d1:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T09:23:30,538 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:23:30,538 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 59fa39bbf7af22b65ee5a2abca8580d1:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T09:23:30,538 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:23:30,538 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 59fa39bbf7af22b65ee5a2abca8580d1:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T09:23:30,538 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:23:30,538 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T09:23:30,539 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 133406 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T09:23:30,539 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1540): 59fa39bbf7af22b65ee5a2abca8580d1/A is initiating minor compaction (all files) 2024-11-28T09:23:30,539 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 59fa39bbf7af22b65ee5a2abca8580d1/A in TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:30,539 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/68365344f4ce446fba3eda611768d7a2, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/92d685df025b4f48ad785f7d47651752, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/dc2a6c6c0ad942f5a0b72bb6c823af51, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/40fa901fe07c4e9da006d64742f15498] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp, totalSize=130.3 K 2024-11-28T09:23:30,539 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:30,539 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. files: [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/68365344f4ce446fba3eda611768d7a2, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/92d685df025b4f48ad785f7d47651752, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/dc2a6c6c0ad942f5a0b72bb6c823af51, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/40fa901fe07c4e9da006d64742f15498] 2024-11-28T09:23:30,540 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48946 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T09:23:30,540 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): 59fa39bbf7af22b65ee5a2abca8580d1/B is initiating minor compaction (all files) 2024-11-28T09:23:30,540 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 59fa39bbf7af22b65ee5a2abca8580d1/B in TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:30,540 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/664acb26643348438b65f43d30cc7b79, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/f0e54a1cd5294af886768c799968d784, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/f9e7a1f9c7a64e42a3ca85531da73066, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/6f936f5cfe574c399c377a68a5d3e6df] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp, totalSize=47.8 K 2024-11-28T09:23:30,540 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 68365344f4ce446fba3eda611768d7a2, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=145, earliestPutTs=1732785805715 2024-11-28T09:23:30,540 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 664acb26643348438b65f43d30cc7b79, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=145, earliestPutTs=1732785805715 2024-11-28T09:23:30,541 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 92d685df025b4f48ad785f7d47651752, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1732785806899 2024-11-28T09:23:30,541 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting f0e54a1cd5294af886768c799968d784, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1732785806899 2024-11-28T09:23:30,541 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting dc2a6c6c0ad942f5a0b72bb6c823af51, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=184, earliestPutTs=1732785807545 2024-11-28T09:23:30,541 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting f9e7a1f9c7a64e42a3ca85531da73066, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=184, earliestPutTs=1732785807545 2024-11-28T09:23:30,541 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 40fa901fe07c4e9da006d64742f15498, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1732785808724 2024-11-28T09:23:30,541 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 6f936f5cfe574c399c377a68a5d3e6df, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1732785808724 2024-11-28T09:23:30,558 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 59fa39bbf7af22b65ee5a2abca8580d1#B#compaction#624 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:23:30,558 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/B/8bb76a3166b847f3afb939584900d0b6 is 50, key is test_row_0/B:col10/1732785809863/Put/seqid=0 2024-11-28T09:23:30,568 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=59fa39bbf7af22b65ee5a2abca8580d1] 2024-11-28T09:23:30,583 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024112893c272d61d564bd893088e16182a135f_59fa39bbf7af22b65ee5a2abca8580d1 store=[table=TestAcidGuarantees family=A region=59fa39bbf7af22b65ee5a2abca8580d1] 2024-11-28T09:23:30,587 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024112893c272d61d564bd893088e16182a135f_59fa39bbf7af22b65ee5a2abca8580d1, store=[table=TestAcidGuarantees family=A region=59fa39bbf7af22b65ee5a2abca8580d1] 2024-11-28T09:23:30,587 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112893c272d61d564bd893088e16182a135f_59fa39bbf7af22b65ee5a2abca8580d1 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=59fa39bbf7af22b65ee5a2abca8580d1] 2024-11-28T09:23:30,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-28T09:23:30,591 INFO [Thread-2928 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 171 completed 2024-11-28T09:23:30,592 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T09:23:30,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] procedure2.ProcedureExecutor(1098): Stored pid=173, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees 2024-11-28T09:23:30,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-28T09:23:30,595 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=173, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T09:23:30,595 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=173, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T09:23:30,595 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=174, ppid=173, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T09:23:30,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742552_1728 (size=12629) 2024-11-28T09:23:30,614 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/B/8bb76a3166b847f3afb939584900d0b6 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/8bb76a3166b847f3afb939584900d0b6 2024-11-28T09:23:30,619 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 59fa39bbf7af22b65ee5a2abca8580d1/B of 59fa39bbf7af22b65ee5a2abca8580d1 into 8bb76a3166b847f3afb939584900d0b6(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:23:30,619 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 59fa39bbf7af22b65ee5a2abca8580d1: 2024-11-28T09:23:30,619 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1., storeName=59fa39bbf7af22b65ee5a2abca8580d1/B, priority=12, startTime=1732785810538; duration=0sec 2024-11-28T09:23:30,619 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:23:30,619 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 59fa39bbf7af22b65ee5a2abca8580d1:B 2024-11-28T09:23:30,619 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T09:23:30,621 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48946 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T09:23:30,621 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): 59fa39bbf7af22b65ee5a2abca8580d1/C is initiating minor compaction (all files) 2024-11-28T09:23:30,621 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 59fa39bbf7af22b65ee5a2abca8580d1/C in TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:30,621 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/fb80aa85f98443bd97e892c89ef489d0, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/372184545bd848f2b75343a108a9c845, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/a01c8e96fd384b919f73697e1d4fa281, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/858ff778ec174426ab09f0cf756ea8b0] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp, totalSize=47.8 K 2024-11-28T09:23:30,621 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting fb80aa85f98443bd97e892c89ef489d0, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=145, earliestPutTs=1732785805715 2024-11-28T09:23:30,622 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 372184545bd848f2b75343a108a9c845, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=169, earliestPutTs=1732785806899 2024-11-28T09:23:30,622 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting a01c8e96fd384b919f73697e1d4fa281, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=184, earliestPutTs=1732785807545 2024-11-28T09:23:30,622 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 858ff778ec174426ab09f0cf756ea8b0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1732785808724 2024-11-28T09:23:30,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742553_1729 (size=4469) 2024-11-28T09:23:30,644 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 59fa39bbf7af22b65ee5a2abca8580d1#A#compaction#625 average throughput is 0.32 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:23:30,644 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/5aa2c4e274b44ceea874897ea61ed9b7 is 175, key is test_row_0/A:col10/1732785809863/Put/seqid=0 2024-11-28T09:23:30,653 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 59fa39bbf7af22b65ee5a2abca8580d1#C#compaction#626 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:23:30,653 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/C/9bb5cf2dfca04680b160eae70f374cd8 is 50, key is test_row_0/C:col10/1732785809863/Put/seqid=0 2024-11-28T09:23:30,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742554_1730 (size=31583) 2024-11-28T09:23:30,694 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/5aa2c4e274b44ceea874897ea61ed9b7 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/5aa2c4e274b44ceea874897ea61ed9b7 2024-11-28T09:23:30,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-28T09:23:30,698 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 59fa39bbf7af22b65ee5a2abca8580d1/A of 59fa39bbf7af22b65ee5a2abca8580d1 into 5aa2c4e274b44ceea874897ea61ed9b7(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:23:30,698 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 59fa39bbf7af22b65ee5a2abca8580d1: 2024-11-28T09:23:30,698 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1., storeName=59fa39bbf7af22b65ee5a2abca8580d1/A, priority=12, startTime=1732785810537; duration=0sec 2024-11-28T09:23:30,698 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:23:30,698 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 59fa39bbf7af22b65ee5a2abca8580d1:A 2024-11-28T09:23:30,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742555_1731 (size=12629) 2024-11-28T09:23:30,746 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:23:30,746 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-28T09:23:30,747 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:30,747 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2837): Flushing 59fa39bbf7af22b65ee5a2abca8580d1 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-28T09:23:30,747 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59fa39bbf7af22b65ee5a2abca8580d1, store=A 2024-11-28T09:23:30,747 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:30,747 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59fa39bbf7af22b65ee5a2abca8580d1, store=B 2024-11-28T09:23:30,747 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:30,747 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59fa39bbf7af22b65ee5a2abca8580d1, store=C 2024-11-28T09:23:30,747 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:30,779 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128de025fbb729f400ea85b2d6ba5295f49_59fa39bbf7af22b65ee5a2abca8580d1 is 50, key is test_row_0/A:col10/1732785809883/Put/seqid=0 2024-11-28T09:23:30,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742556_1732 (size=12304) 2024-11-28T09:23:30,829 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:30,873 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128de025fbb729f400ea85b2d6ba5295f49_59fa39bbf7af22b65ee5a2abca8580d1 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128de025fbb729f400ea85b2d6ba5295f49_59fa39bbf7af22b65ee5a2abca8580d1 2024-11-28T09:23:30,876 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/2b01caae08c847e99ca6f330462da3b4, store: [table=TestAcidGuarantees family=A region=59fa39bbf7af22b65ee5a2abca8580d1] 2024-11-28T09:23:30,877 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/2b01caae08c847e99ca6f330462da3b4 is 175, key is test_row_0/A:col10/1732785809883/Put/seqid=0 2024-11-28T09:23:30,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-28T09:23:30,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742557_1733 (size=31105) 2024-11-28T09:23:31,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 59fa39bbf7af22b65ee5a2abca8580d1 2024-11-28T09:23:31,016 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. as already flushing 2024-11-28T09:23:31,070 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:31,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38682 deadline: 1732785871066, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:31,070 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:31,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38714 deadline: 1732785871067, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:31,129 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/C/9bb5cf2dfca04680b160eae70f374cd8 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/9bb5cf2dfca04680b160eae70f374cd8 2024-11-28T09:23:31,134 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 59fa39bbf7af22b65ee5a2abca8580d1/C of 59fa39bbf7af22b65ee5a2abca8580d1 into 9bb5cf2dfca04680b160eae70f374cd8(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:23:31,134 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 59fa39bbf7af22b65ee5a2abca8580d1: 2024-11-28T09:23:31,134 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1., storeName=59fa39bbf7af22b65ee5a2abca8580d1/C, priority=12, startTime=1732785810538; duration=0sec 2024-11-28T09:23:31,134 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:23:31,134 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 59fa39bbf7af22b65ee5a2abca8580d1:C 2024-11-28T09:23:31,147 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:31,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38722 deadline: 1732785871146, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:31,148 DEBUG [Thread-2922 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8171 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1., hostname=363d8d38a970,33819,1732785660637, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-28T09:23:31,162 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:31,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38726 deadline: 1732785871161, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:31,163 DEBUG [Thread-2918 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8185 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1., hostname=363d8d38a970,33819,1732785660637, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-28T09:23:31,172 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:31,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38682 deadline: 1732785871171, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:31,172 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:31,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38714 deadline: 1732785871171, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:31,180 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:31,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38668 deadline: 1732785871176, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:31,181 DEBUG [Thread-2926 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8205 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1., hostname=363d8d38a970,33819,1732785660637, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-28T09:23:31,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-28T09:23:31,303 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=222, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/2b01caae08c847e99ca6f330462da3b4 2024-11-28T09:23:31,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/B/db06100243ce4785afe2e70b55b7aeea is 50, key is test_row_0/B:col10/1732785809883/Put/seqid=0 2024-11-28T09:23:31,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742558_1734 (size=12151) 2024-11-28T09:23:31,353 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=222 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/B/db06100243ce4785afe2e70b55b7aeea 2024-11-28T09:23:31,374 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/C/95606fc0b5a74e479f76905ac6bc9d77 is 50, key is test_row_0/C:col10/1732785809883/Put/seqid=0 2024-11-28T09:23:31,376 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:31,376 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:31,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38714 deadline: 1732785871374, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:31,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38682 deadline: 1732785871375, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:31,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742559_1735 (size=12151) 2024-11-28T09:23:31,681 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:31,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38682 deadline: 1732785871679, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:31,681 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:31,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38714 deadline: 1732785871680, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:31,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-28T09:23:31,818 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=222 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/C/95606fc0b5a74e479f76905ac6bc9d77 2024-11-28T09:23:31,825 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/2b01caae08c847e99ca6f330462da3b4 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/2b01caae08c847e99ca6f330462da3b4 2024-11-28T09:23:31,829 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/2b01caae08c847e99ca6f330462da3b4, entries=150, sequenceid=222, filesize=30.4 K 2024-11-28T09:23:31,830 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/B/db06100243ce4785afe2e70b55b7aeea as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/db06100243ce4785afe2e70b55b7aeea 2024-11-28T09:23:31,835 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/db06100243ce4785afe2e70b55b7aeea, entries=150, sequenceid=222, filesize=11.9 K 2024-11-28T09:23:31,839 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/C/95606fc0b5a74e479f76905ac6bc9d77 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/95606fc0b5a74e479f76905ac6bc9d77 2024-11-28T09:23:31,847 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/95606fc0b5a74e479f76905ac6bc9d77, entries=150, sequenceid=222, filesize=11.9 K 2024-11-28T09:23:31,848 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 59fa39bbf7af22b65ee5a2abca8580d1 in 1101ms, sequenceid=222, compaction requested=false 2024-11-28T09:23:31,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2538): Flush status journal for 59fa39bbf7af22b65ee5a2abca8580d1: 2024-11-28T09:23:31,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:31,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=174 2024-11-28T09:23:31,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4106): Remote procedure done, pid=174 2024-11-28T09:23:31,851 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=174, resume processing ppid=173 2024-11-28T09:23:31,851 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=174, ppid=173, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2550 sec 2024-11-28T09:23:31,853 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=173, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees in 1.2590 sec 2024-11-28T09:23:32,187 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 59fa39bbf7af22b65ee5a2abca8580d1 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-28T09:23:32,187 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59fa39bbf7af22b65ee5a2abca8580d1, store=A 2024-11-28T09:23:32,187 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:32,187 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59fa39bbf7af22b65ee5a2abca8580d1, store=B 2024-11-28T09:23:32,187 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:32,187 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59fa39bbf7af22b65ee5a2abca8580d1, store=C 2024-11-28T09:23:32,187 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:32,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 59fa39bbf7af22b65ee5a2abca8580d1 2024-11-28T09:23:32,206 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:32,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38714 deadline: 1732785872203, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:32,206 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128025330d4c6c949469b8b4b0a724bf8ae_59fa39bbf7af22b65ee5a2abca8580d1 is 50, key is test_row_0/A:col10/1732785811064/Put/seqid=0 2024-11-28T09:23:32,207 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:32,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38682 deadline: 1732785872203, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:32,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742560_1736 (size=12304) 2024-11-28T09:23:32,310 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:32,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38714 deadline: 1732785872307, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:32,311 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:32,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38682 deadline: 1732785872308, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:32,516 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:32,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38682 deadline: 1732785872512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:32,527 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:32,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38714 deadline: 1732785872524, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:32,673 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:32,677 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128025330d4c6c949469b8b4b0a724bf8ae_59fa39bbf7af22b65ee5a2abca8580d1 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128025330d4c6c949469b8b4b0a724bf8ae_59fa39bbf7af22b65ee5a2abca8580d1 2024-11-28T09:23:32,679 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/26891d71e15d4757bb94d1b73eb9ebc4, store: [table=TestAcidGuarantees family=A region=59fa39bbf7af22b65ee5a2abca8580d1] 2024-11-28T09:23:32,680 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/26891d71e15d4757bb94d1b73eb9ebc4 is 175, key is test_row_0/A:col10/1732785811064/Put/seqid=0 2024-11-28T09:23:32,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-28T09:23:32,699 INFO [Thread-2928 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 173 completed 2024-11-28T09:23:32,701 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T09:23:32,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] procedure2.ProcedureExecutor(1098): Stored pid=175, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=175, table=TestAcidGuarantees 2024-11-28T09:23:32,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-28T09:23:32,703 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=175, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=175, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T09:23:32,704 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=175, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=175, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T09:23:32,704 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=176, ppid=175, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T09:23:32,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742561_1737 (size=31105) 2024-11-28T09:23:32,718 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=249, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/26891d71e15d4757bb94d1b73eb9ebc4 2024-11-28T09:23:32,750 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/B/6e74bf8e645b4c3f989cf609c3b3dad5 is 50, key is test_row_0/B:col10/1732785811064/Put/seqid=0 2024-11-28T09:23:32,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742562_1738 (size=12151) 2024-11-28T09:23:32,795 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=249 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/B/6e74bf8e645b4c3f989cf609c3b3dad5 2024-11-28T09:23:32,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-28T09:23:32,814 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/C/dcbd627ec4224641844fe33768bc2051 is 50, key is test_row_0/C:col10/1732785811064/Put/seqid=0 2024-11-28T09:23:32,819 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:32,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38682 deadline: 1732785872817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:32,829 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:32,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38714 deadline: 1732785872828, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:32,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742563_1739 (size=12151) 2024-11-28T09:23:32,835 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=249 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/C/dcbd627ec4224641844fe33768bc2051 2024-11-28T09:23:32,843 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/26891d71e15d4757bb94d1b73eb9ebc4 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/26891d71e15d4757bb94d1b73eb9ebc4 2024-11-28T09:23:32,850 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/26891d71e15d4757bb94d1b73eb9ebc4, entries=150, sequenceid=249, filesize=30.4 K 2024-11-28T09:23:32,855 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/B/6e74bf8e645b4c3f989cf609c3b3dad5 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/6e74bf8e645b4c3f989cf609c3b3dad5 2024-11-28T09:23:32,855 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:23:32,856 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-11-28T09:23:32,856 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:32,856 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. as already flushing 2024-11-28T09:23:32,856 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:32,856 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] handler.RSProcedureHandler(58): pid=176 java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:32,856 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=176 java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:32,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=176 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:32,865 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/6e74bf8e645b4c3f989cf609c3b3dad5, entries=150, sequenceid=249, filesize=11.9 K 2024-11-28T09:23:32,865 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/C/dcbd627ec4224641844fe33768bc2051 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/dcbd627ec4224641844fe33768bc2051 2024-11-28T09:23:32,869 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/dcbd627ec4224641844fe33768bc2051, entries=150, sequenceid=249, filesize=11.9 K 2024-11-28T09:23:32,870 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 59fa39bbf7af22b65ee5a2abca8580d1 in 683ms, sequenceid=249, compaction requested=true 2024-11-28T09:23:32,870 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 59fa39bbf7af22b65ee5a2abca8580d1: 2024-11-28T09:23:32,871 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 59fa39bbf7af22b65ee5a2abca8580d1:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T09:23:32,871 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:23:32,871 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 59fa39bbf7af22b65ee5a2abca8580d1:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T09:23:32,871 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:23:32,871 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:23:32,871 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:23:32,871 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 59fa39bbf7af22b65ee5a2abca8580d1:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T09:23:32,871 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:23:32,872 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36931 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:23:32,872 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): 59fa39bbf7af22b65ee5a2abca8580d1/B is initiating minor compaction (all files) 2024-11-28T09:23:32,872 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 59fa39bbf7af22b65ee5a2abca8580d1/B in TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:32,872 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/8bb76a3166b847f3afb939584900d0b6, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/db06100243ce4785afe2e70b55b7aeea, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/6e74bf8e645b4c3f989cf609c3b3dad5] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp, totalSize=36.1 K 2024-11-28T09:23:32,873 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93793 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:23:32,873 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1540): 59fa39bbf7af22b65ee5a2abca8580d1/A is initiating minor compaction (all files) 2024-11-28T09:23:32,873 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 59fa39bbf7af22b65ee5a2abca8580d1/A in TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:32,873 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/5aa2c4e274b44ceea874897ea61ed9b7, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/2b01caae08c847e99ca6f330462da3b4, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/26891d71e15d4757bb94d1b73eb9ebc4] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp, totalSize=91.6 K 2024-11-28T09:23:32,873 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:32,873 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. files: [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/5aa2c4e274b44ceea874897ea61ed9b7, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/2b01caae08c847e99ca6f330462da3b4, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/26891d71e15d4757bb94d1b73eb9ebc4] 2024-11-28T09:23:32,874 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 8bb76a3166b847f3afb939584900d0b6, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1732785808724 2024-11-28T09:23:32,874 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5aa2c4e274b44ceea874897ea61ed9b7, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1732785808724 2024-11-28T09:23:32,874 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting db06100243ce4785afe2e70b55b7aeea, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=222, earliestPutTs=1732785809877 2024-11-28T09:23:32,875 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2b01caae08c847e99ca6f330462da3b4, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=222, earliestPutTs=1732785809877 2024-11-28T09:23:32,875 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 6e74bf8e645b4c3f989cf609c3b3dad5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1732785811064 2024-11-28T09:23:32,875 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 26891d71e15d4757bb94d1b73eb9ebc4, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1732785811064 2024-11-28T09:23:32,900 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 59fa39bbf7af22b65ee5a2abca8580d1#B#compaction#633 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:23:32,902 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/B/a70c4912253043c594885211df95f68b is 50, key is test_row_0/B:col10/1732785811064/Put/seqid=0 2024-11-28T09:23:32,909 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=59fa39bbf7af22b65ee5a2abca8580d1] 2024-11-28T09:23:32,934 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241128e22a131c21d943f3bdcfd2ecda654561_59fa39bbf7af22b65ee5a2abca8580d1 store=[table=TestAcidGuarantees family=A region=59fa39bbf7af22b65ee5a2abca8580d1] 2024-11-28T09:23:32,936 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241128e22a131c21d943f3bdcfd2ecda654561_59fa39bbf7af22b65ee5a2abca8580d1, store=[table=TestAcidGuarantees family=A region=59fa39bbf7af22b65ee5a2abca8580d1] 2024-11-28T09:23:32,936 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128e22a131c21d943f3bdcfd2ecda654561_59fa39bbf7af22b65ee5a2abca8580d1 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=59fa39bbf7af22b65ee5a2abca8580d1] 2024-11-28T09:23:32,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742564_1740 (size=12731) 2024-11-28T09:23:32,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742565_1741 (size=4469) 2024-11-28T09:23:33,000 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 59fa39bbf7af22b65ee5a2abca8580d1#A#compaction#634 average throughput is 0.27 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:23:33,001 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/0ae7aa7f13be4f3a8745a16976272ecb is 175, key is test_row_0/A:col10/1732785811064/Put/seqid=0 2024-11-28T09:23:33,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-28T09:23:33,012 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:23:33,013 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-11-28T09:23:33,013 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:33,014 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2837): Flushing 59fa39bbf7af22b65ee5a2abca8580d1 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-11-28T09:23:33,014 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59fa39bbf7af22b65ee5a2abca8580d1, store=A 2024-11-28T09:23:33,014 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:33,014 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59fa39bbf7af22b65ee5a2abca8580d1, store=B 2024-11-28T09:23:33,014 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:33,014 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59fa39bbf7af22b65ee5a2abca8580d1, store=C 2024-11-28T09:23:33,014 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:33,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742566_1742 (size=31685) 2024-11-28T09:23:33,051 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/0ae7aa7f13be4f3a8745a16976272ecb as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/0ae7aa7f13be4f3a8745a16976272ecb 2024-11-28T09:23:33,059 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 59fa39bbf7af22b65ee5a2abca8580d1/A of 59fa39bbf7af22b65ee5a2abca8580d1 into 0ae7aa7f13be4f3a8745a16976272ecb(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:23:33,059 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 59fa39bbf7af22b65ee5a2abca8580d1: 2024-11-28T09:23:33,059 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1., storeName=59fa39bbf7af22b65ee5a2abca8580d1/A, priority=13, startTime=1732785812870; duration=0sec 2024-11-28T09:23:33,059 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:23:33,059 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 59fa39bbf7af22b65ee5a2abca8580d1:A 2024-11-28T09:23:33,059 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:23:33,060 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36931 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:23:33,060 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1540): 59fa39bbf7af22b65ee5a2abca8580d1/C is initiating minor compaction (all files) 2024-11-28T09:23:33,060 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 59fa39bbf7af22b65ee5a2abca8580d1/C in TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:33,061 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/9bb5cf2dfca04680b160eae70f374cd8, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/95606fc0b5a74e479f76905ac6bc9d77, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/dcbd627ec4224641844fe33768bc2051] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp, totalSize=36.1 K 2024-11-28T09:23:33,061 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9bb5cf2dfca04680b160eae70f374cd8, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=209, earliestPutTs=1732785808724 2024-11-28T09:23:33,061 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 95606fc0b5a74e479f76905ac6bc9d77, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=222, earliestPutTs=1732785809877 2024-11-28T09:23:33,062 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting dcbd627ec4224641844fe33768bc2051, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1732785811064 2024-11-28T09:23:33,063 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411287daee126c81344919ab9473d4b6704ac_59fa39bbf7af22b65ee5a2abca8580d1 is 50, key is test_row_0/A:col10/1732785812202/Put/seqid=0 2024-11-28T09:23:33,096 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 59fa39bbf7af22b65ee5a2abca8580d1#C#compaction#636 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:23:33,097 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/C/8cfdb1727e784476a7239ca2e3f38836 is 50, key is test_row_0/C:col10/1732785811064/Put/seqid=0 2024-11-28T09:23:33,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742567_1743 (size=12404) 2024-11-28T09:23:33,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742568_1744 (size=12731) 2024-11-28T09:23:33,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-28T09:23:33,327 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. as already flushing 2024-11-28T09:23:33,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 59fa39bbf7af22b65ee5a2abca8580d1 2024-11-28T09:23:33,363 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/B/a70c4912253043c594885211df95f68b as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/a70c4912253043c594885211df95f68b 2024-11-28T09:23:33,369 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 59fa39bbf7af22b65ee5a2abca8580d1/B of 59fa39bbf7af22b65ee5a2abca8580d1 into a70c4912253043c594885211df95f68b(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:23:33,369 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 59fa39bbf7af22b65ee5a2abca8580d1: 2024-11-28T09:23:33,369 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1., storeName=59fa39bbf7af22b65ee5a2abca8580d1/B, priority=13, startTime=1732785812871; duration=0sec 2024-11-28T09:23:33,370 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:23:33,370 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 59fa39bbf7af22b65ee5a2abca8580d1:B 2024-11-28T09:23:33,389 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:33,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38714 deadline: 1732785873385, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:33,390 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:33,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38682 deadline: 1732785873386, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:33,493 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:33,493 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:33,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38714 deadline: 1732785873491, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:33,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38682 deadline: 1732785873491, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:33,518 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:33,522 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411287daee126c81344919ab9473d4b6704ac_59fa39bbf7af22b65ee5a2abca8580d1 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411287daee126c81344919ab9473d4b6704ac_59fa39bbf7af22b65ee5a2abca8580d1 2024-11-28T09:23:33,524 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/dc36ee46f2b14c048ded9069db6d4ee3, store: [table=TestAcidGuarantees family=A region=59fa39bbf7af22b65ee5a2abca8580d1] 2024-11-28T09:23:33,525 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/dc36ee46f2b14c048ded9069db6d4ee3 is 175, key is test_row_0/A:col10/1732785812202/Put/seqid=0 2024-11-28T09:23:33,550 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/C/8cfdb1727e784476a7239ca2e3f38836 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/8cfdb1727e784476a7239ca2e3f38836 2024-11-28T09:23:33,555 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 59fa39bbf7af22b65ee5a2abca8580d1/C of 59fa39bbf7af22b65ee5a2abca8580d1 into 8cfdb1727e784476a7239ca2e3f38836(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:23:33,555 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 59fa39bbf7af22b65ee5a2abca8580d1: 2024-11-28T09:23:33,555 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1., storeName=59fa39bbf7af22b65ee5a2abca8580d1/C, priority=13, startTime=1732785812871; duration=0sec 2024-11-28T09:23:33,555 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:23:33,555 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 59fa39bbf7af22b65ee5a2abca8580d1:C 2024-11-28T09:23:33,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742569_1745 (size=31205) 2024-11-28T09:23:33,558 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=259, memsize=15.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/dc36ee46f2b14c048ded9069db6d4ee3 2024-11-28T09:23:33,565 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/B/e399338301ca4f048809e395c5900425 is 50, key is test_row_0/B:col10/1732785812202/Put/seqid=0 2024-11-28T09:23:33,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742570_1746 (size=12251) 2024-11-28T09:23:33,584 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=259 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/B/e399338301ca4f048809e395c5900425 2024-11-28T09:23:33,594 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/C/29966b01de034a9997e3b8791b3f5a94 is 50, key is test_row_0/C:col10/1732785812202/Put/seqid=0 2024-11-28T09:23:33,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742571_1747 (size=12251) 2024-11-28T09:23:33,620 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=259 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/C/29966b01de034a9997e3b8791b3f5a94 2024-11-28T09:23:33,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/dc36ee46f2b14c048ded9069db6d4ee3 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/dc36ee46f2b14c048ded9069db6d4ee3 2024-11-28T09:23:33,633 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/dc36ee46f2b14c048ded9069db6d4ee3, entries=150, sequenceid=259, filesize=30.5 K 2024-11-28T09:23:33,637 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/B/e399338301ca4f048809e395c5900425 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/e399338301ca4f048809e395c5900425 2024-11-28T09:23:33,643 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/e399338301ca4f048809e395c5900425, entries=150, sequenceid=259, filesize=12.0 K 2024-11-28T09:23:33,644 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/C/29966b01de034a9997e3b8791b3f5a94 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/29966b01de034a9997e3b8791b3f5a94 2024-11-28T09:23:33,650 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/29966b01de034a9997e3b8791b3f5a94, entries=150, sequenceid=259, filesize=12.0 K 2024-11-28T09:23:33,652 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=161.02 KB/164880 for 59fa39bbf7af22b65ee5a2abca8580d1 in 638ms, sequenceid=259, compaction requested=false 2024-11-28T09:23:33,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2538): Flush status journal for 59fa39bbf7af22b65ee5a2abca8580d1: 2024-11-28T09:23:33,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:33,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=176 2024-11-28T09:23:33,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4106): Remote procedure done, pid=176 2024-11-28T09:23:33,656 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=176, resume processing ppid=175 2024-11-28T09:23:33,656 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=176, ppid=175, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 949 msec 2024-11-28T09:23:33,658 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=175, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=175, table=TestAcidGuarantees in 956 msec 2024-11-28T09:23:33,698 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 59fa39bbf7af22b65ee5a2abca8580d1 3/3 column families, dataSize=167.72 KB heapSize=440.20 KB 2024-11-28T09:23:33,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 59fa39bbf7af22b65ee5a2abca8580d1 2024-11-28T09:23:33,699 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59fa39bbf7af22b65ee5a2abca8580d1, store=A 2024-11-28T09:23:33,699 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:33,699 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59fa39bbf7af22b65ee5a2abca8580d1, store=B 2024-11-28T09:23:33,699 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:33,699 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59fa39bbf7af22b65ee5a2abca8580d1, store=C 2024-11-28T09:23:33,699 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:33,712 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:33,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38714 deadline: 1732785873710, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:33,713 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:33,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38682 deadline: 1732785873710, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:33,714 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411281b55da8ca8804aa181022405d1857f3b_59fa39bbf7af22b65ee5a2abca8580d1 is 50, key is test_row_0/A:col10/1732785813377/Put/seqid=0 2024-11-28T09:23:33,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742572_1748 (size=14994) 2024-11-28T09:23:33,751 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:33,759 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411281b55da8ca8804aa181022405d1857f3b_59fa39bbf7af22b65ee5a2abca8580d1 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411281b55da8ca8804aa181022405d1857f3b_59fa39bbf7af22b65ee5a2abca8580d1 2024-11-28T09:23:33,762 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/01cf38d0e4d64ecf87f9cad82b32398b, store: [table=TestAcidGuarantees family=A region=59fa39bbf7af22b65ee5a2abca8580d1] 2024-11-28T09:23:33,765 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/01cf38d0e4d64ecf87f9cad82b32398b is 175, key is test_row_0/A:col10/1732785813377/Put/seqid=0 2024-11-28T09:23:33,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742573_1749 (size=39949) 2024-11-28T09:23:33,804 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=291, memsize=58.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/01cf38d0e4d64ecf87f9cad82b32398b 2024-11-28T09:23:33,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-28T09:23:33,806 INFO [Thread-2928 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 175 completed 2024-11-28T09:23:33,808 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T09:23:33,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] procedure2.ProcedureExecutor(1098): Stored pid=177, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=177, table=TestAcidGuarantees 2024-11-28T09:23:33,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-11-28T09:23:33,809 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=177, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=177, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T09:23:33,810 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=177, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=177, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T09:23:33,810 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=178, ppid=177, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T09:23:33,818 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:33,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38682 deadline: 1732785873816, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:33,822 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:33,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38714 deadline: 1732785873821, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:33,826 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/B/bcd07dfcb8f5401796abf31446a2ce36 is 50, key is test_row_0/B:col10/1732785813377/Put/seqid=0 2024-11-28T09:23:33,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742574_1750 (size=12301) 2024-11-28T09:23:33,896 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=291 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/B/bcd07dfcb8f5401796abf31446a2ce36 2024-11-28T09:23:33,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-11-28T09:23:33,928 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/C/7cfe7782dbe741c59b8648e551d6ecb0 is 50, key is test_row_0/C:col10/1732785813377/Put/seqid=0 2024-11-28T09:23:33,963 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:23:33,963 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-11-28T09:23:33,963 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:33,963 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. as already flushing 2024-11-28T09:23:33,963 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:33,963 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:33,964 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:33,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:33,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742575_1751 (size=12301) 2024-11-28T09:23:33,974 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=291 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/C/7cfe7782dbe741c59b8648e551d6ecb0 2024-11-28T09:23:33,979 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/01cf38d0e4d64ecf87f9cad82b32398b as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/01cf38d0e4d64ecf87f9cad82b32398b 2024-11-28T09:23:33,983 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/01cf38d0e4d64ecf87f9cad82b32398b, entries=200, sequenceid=291, filesize=39.0 K 2024-11-28T09:23:33,984 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/B/bcd07dfcb8f5401796abf31446a2ce36 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/bcd07dfcb8f5401796abf31446a2ce36 2024-11-28T09:23:33,988 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/bcd07dfcb8f5401796abf31446a2ce36, entries=150, sequenceid=291, filesize=12.0 K 2024-11-28T09:23:33,989 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/C/7cfe7782dbe741c59b8648e551d6ecb0 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/7cfe7782dbe741c59b8648e551d6ecb0 2024-11-28T09:23:33,995 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/7cfe7782dbe741c59b8648e551d6ecb0, entries=150, sequenceid=291, filesize=12.0 K 2024-11-28T09:23:33,996 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~174.43 KB/178620, heapSize ~457.73 KB/468720, currentSize=26.84 KB/27480 for 59fa39bbf7af22b65ee5a2abca8580d1 in 298ms, sequenceid=291, compaction requested=true 2024-11-28T09:23:33,996 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 59fa39bbf7af22b65ee5a2abca8580d1: 2024-11-28T09:23:33,996 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 59fa39bbf7af22b65ee5a2abca8580d1:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T09:23:33,996 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:23:33,996 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:23:33,996 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 59fa39bbf7af22b65ee5a2abca8580d1:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T09:23:33,996 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:23:33,996 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 59fa39bbf7af22b65ee5a2abca8580d1:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T09:23:33,996 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-28T09:23:33,996 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:23:33,997 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102839 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:23:33,997 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37283 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:23:33,997 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1540): 59fa39bbf7af22b65ee5a2abca8580d1/A is initiating minor compaction (all files) 2024-11-28T09:23:33,997 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): 59fa39bbf7af22b65ee5a2abca8580d1/B is initiating minor compaction (all files) 2024-11-28T09:23:33,997 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 59fa39bbf7af22b65ee5a2abca8580d1/A in TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:33,997 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 59fa39bbf7af22b65ee5a2abca8580d1/B in TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:33,998 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/0ae7aa7f13be4f3a8745a16976272ecb, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/dc36ee46f2b14c048ded9069db6d4ee3, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/01cf38d0e4d64ecf87f9cad82b32398b] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp, totalSize=100.4 K 2024-11-28T09:23:33,998 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/a70c4912253043c594885211df95f68b, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/e399338301ca4f048809e395c5900425, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/bcd07dfcb8f5401796abf31446a2ce36] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp, totalSize=36.4 K 2024-11-28T09:23:33,998 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:33,998 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. files: [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/0ae7aa7f13be4f3a8745a16976272ecb, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/dc36ee46f2b14c048ded9069db6d4ee3, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/01cf38d0e4d64ecf87f9cad82b32398b] 2024-11-28T09:23:33,998 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting a70c4912253043c594885211df95f68b, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1732785811064 2024-11-28T09:23:33,998 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0ae7aa7f13be4f3a8745a16976272ecb, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1732785811064 2024-11-28T09:23:33,999 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting dc36ee46f2b14c048ded9069db6d4ee3, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=259, earliestPutTs=1732785812193 2024-11-28T09:23:33,999 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting e399338301ca4f048809e395c5900425, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=259, earliestPutTs=1732785812193 2024-11-28T09:23:33,999 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting bcd07dfcb8f5401796abf31446a2ce36, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1732785813377 2024-11-28T09:23:33,999 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 01cf38d0e4d64ecf87f9cad82b32398b, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1732785813377 2024-11-28T09:23:34,007 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=59fa39bbf7af22b65ee5a2abca8580d1] 2024-11-28T09:23:34,008 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 59fa39bbf7af22b65ee5a2abca8580d1#B#compaction#642 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:23:34,009 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/B/19283596fdab48c3b50004d7f0c1ad66 is 50, key is test_row_0/B:col10/1732785813377/Put/seqid=0 2024-11-28T09:23:34,025 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241128857203c82bbf461399acfd1b64439c8f_59fa39bbf7af22b65ee5a2abca8580d1 store=[table=TestAcidGuarantees family=A region=59fa39bbf7af22b65ee5a2abca8580d1] 2024-11-28T09:23:34,028 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241128857203c82bbf461399acfd1b64439c8f_59fa39bbf7af22b65ee5a2abca8580d1, store=[table=TestAcidGuarantees family=A region=59fa39bbf7af22b65ee5a2abca8580d1] 2024-11-28T09:23:34,029 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128857203c82bbf461399acfd1b64439c8f_59fa39bbf7af22b65ee5a2abca8580d1 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=59fa39bbf7af22b65ee5a2abca8580d1] 2024-11-28T09:23:34,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 59fa39bbf7af22b65ee5a2abca8580d1 2024-11-28T09:23:34,037 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 59fa39bbf7af22b65ee5a2abca8580d1 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-28T09:23:34,037 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59fa39bbf7af22b65ee5a2abca8580d1, store=A 2024-11-28T09:23:34,037 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:34,037 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59fa39bbf7af22b65ee5a2abca8580d1, store=B 2024-11-28T09:23:34,037 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:34,037 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59fa39bbf7af22b65ee5a2abca8580d1, store=C 2024-11-28T09:23:34,037 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:34,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742576_1752 (size=12983) 2024-11-28T09:23:34,062 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/B/19283596fdab48c3b50004d7f0c1ad66 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/19283596fdab48c3b50004d7f0c1ad66 2024-11-28T09:23:34,067 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 59fa39bbf7af22b65ee5a2abca8580d1/B of 59fa39bbf7af22b65ee5a2abca8580d1 into 19283596fdab48c3b50004d7f0c1ad66(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:23:34,067 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 59fa39bbf7af22b65ee5a2abca8580d1: 2024-11-28T09:23:34,067 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1., storeName=59fa39bbf7af22b65ee5a2abca8580d1/B, priority=13, startTime=1732785813996; duration=0sec 2024-11-28T09:23:34,067 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:23:34,067 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 59fa39bbf7af22b65ee5a2abca8580d1:B 2024-11-28T09:23:34,068 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:23:34,068 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37283 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:23:34,068 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): 59fa39bbf7af22b65ee5a2abca8580d1/C is initiating minor compaction (all files) 2024-11-28T09:23:34,068 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 59fa39bbf7af22b65ee5a2abca8580d1/C in TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:34,069 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/8cfdb1727e784476a7239ca2e3f38836, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/29966b01de034a9997e3b8791b3f5a94, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/7cfe7782dbe741c59b8648e551d6ecb0] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp, totalSize=36.4 K 2024-11-28T09:23:34,069 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 8cfdb1727e784476a7239ca2e3f38836, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1732785811064 2024-11-28T09:23:34,070 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 29966b01de034a9997e3b8791b3f5a94, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=259, earliestPutTs=1732785812193 2024-11-28T09:23:34,070 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 7cfe7782dbe741c59b8648e551d6ecb0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1732785813377 2024-11-28T09:23:34,082 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411286f329a63615b4f1881c9df9938f23e68_59fa39bbf7af22b65ee5a2abca8580d1 is 50, key is test_row_0/A:col10/1732785814026/Put/seqid=0 2024-11-28T09:23:34,096 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 59fa39bbf7af22b65ee5a2abca8580d1#C#compaction#645 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:23:34,097 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/C/a6d73812c3ac4ae586f28fd8f50b806a is 50, key is test_row_0/C:col10/1732785813377/Put/seqid=0 2024-11-28T09:23:34,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742577_1753 (size=4469) 2024-11-28T09:23:34,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-11-28T09:23:34,115 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:23:34,115 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-11-28T09:23:34,116 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:34,116 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. as already flushing 2024-11-28T09:23:34,116 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:34,116 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:34,116 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:34,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:34,118 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 59fa39bbf7af22b65ee5a2abca8580d1#A#compaction#643 average throughput is 0.22 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:23:34,119 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/bb2af9a6bfad448aa32d5074a34f029e is 175, key is test_row_0/A:col10/1732785813377/Put/seqid=0 2024-11-28T09:23:34,127 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:34,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38682 deadline: 1732785874123, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:34,130 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:34,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38714 deadline: 1732785874127, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:34,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742578_1754 (size=12454) 2024-11-28T09:23:34,165 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:34,170 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411286f329a63615b4f1881c9df9938f23e68_59fa39bbf7af22b65ee5a2abca8580d1 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411286f329a63615b4f1881c9df9938f23e68_59fa39bbf7af22b65ee5a2abca8580d1 2024-11-28T09:23:34,171 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/573f5c378130441cafea97ea84953bf4, store: [table=TestAcidGuarantees family=A region=59fa39bbf7af22b65ee5a2abca8580d1] 2024-11-28T09:23:34,172 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/573f5c378130441cafea97ea84953bf4 is 175, key is test_row_0/A:col10/1732785814026/Put/seqid=0 2024-11-28T09:23:34,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742579_1755 (size=12983) 2024-11-28T09:23:34,187 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/C/a6d73812c3ac4ae586f28fd8f50b806a as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/a6d73812c3ac4ae586f28fd8f50b806a 2024-11-28T09:23:34,194 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 59fa39bbf7af22b65ee5a2abca8580d1/C of 59fa39bbf7af22b65ee5a2abca8580d1 into a6d73812c3ac4ae586f28fd8f50b806a(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:23:34,194 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 59fa39bbf7af22b65ee5a2abca8580d1: 2024-11-28T09:23:34,194 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1., storeName=59fa39bbf7af22b65ee5a2abca8580d1/C, priority=13, startTime=1732785813996; duration=0sec 2024-11-28T09:23:34,194 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:23:34,194 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 59fa39bbf7af22b65ee5a2abca8580d1:C 2024-11-28T09:23:34,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742580_1756 (size=31937) 2024-11-28T09:23:34,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742581_1757 (size=31255) 2024-11-28T09:23:34,230 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=303, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/573f5c378130441cafea97ea84953bf4 2024-11-28T09:23:34,232 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:34,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38682 deadline: 1732785874228, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:34,235 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:34,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38714 deadline: 1732785874232, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:34,249 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/B/5484b2e3bfca4576a422c2e7e2bb9149 is 50, key is test_row_0/B:col10/1732785814026/Put/seqid=0 2024-11-28T09:23:34,268 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:23:34,269 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-11-28T09:23:34,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:34,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. as already flushing 2024-11-28T09:23:34,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:34,269 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:34,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:34,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:34,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742582_1758 (size=12301) 2024-11-28T09:23:34,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-11-28T09:23:34,421 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:23:34,422 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-11-28T09:23:34,422 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:34,422 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. as already flushing 2024-11-28T09:23:34,422 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:34,422 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:34,422 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:34,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:34,436 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:34,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38682 deadline: 1732785874433, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:34,441 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:34,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38714 deadline: 1732785874437, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:34,575 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:23:34,575 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-11-28T09:23:34,576 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:34,576 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. as already flushing 2024-11-28T09:23:34,576 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:34,576 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:34,576 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:34,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:34,606 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/bb2af9a6bfad448aa32d5074a34f029e as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/bb2af9a6bfad448aa32d5074a34f029e 2024-11-28T09:23:34,612 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 59fa39bbf7af22b65ee5a2abca8580d1/A of 59fa39bbf7af22b65ee5a2abca8580d1 into bb2af9a6bfad448aa32d5074a34f029e(size=31.2 K), total size for store is 31.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:23:34,612 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 59fa39bbf7af22b65ee5a2abca8580d1: 2024-11-28T09:23:34,612 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1., storeName=59fa39bbf7af22b65ee5a2abca8580d1/A, priority=13, startTime=1732785813996; duration=0sec 2024-11-28T09:23:34,612 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:23:34,612 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 59fa39bbf7af22b65ee5a2abca8580d1:A 2024-11-28T09:23:34,686 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=303 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/B/5484b2e3bfca4576a422c2e7e2bb9149 2024-11-28T09:23:34,706 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/C/33b65e3b8081438d9cbb9f4a77b06d5c is 50, key is test_row_0/C:col10/1732785814026/Put/seqid=0 2024-11-28T09:23:34,729 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:23:34,729 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-11-28T09:23:34,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:34,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. as already flushing 2024-11-28T09:23:34,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:34,730 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:34,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:34,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:34,740 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:34,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38682 deadline: 1732785874738, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:34,744 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:34,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38714 deadline: 1732785874742, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:34,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742583_1759 (size=12301) 2024-11-28T09:23:34,883 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:23:34,883 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-11-28T09:23:34,883 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:34,883 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. as already flushing 2024-11-28T09:23:34,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:34,884 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:34,884 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:34,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:34,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-11-28T09:23:35,035 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:23:35,036 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-11-28T09:23:35,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:35,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. as already flushing 2024-11-28T09:23:35,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:35,036 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] handler.RSProcedureHandler(58): pid=178 java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:35,036 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=178 java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:35,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=178 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:35,145 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=303 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/C/33b65e3b8081438d9cbb9f4a77b06d5c 2024-11-28T09:23:35,151 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/573f5c378130441cafea97ea84953bf4 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/573f5c378130441cafea97ea84953bf4 2024-11-28T09:23:35,154 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/573f5c378130441cafea97ea84953bf4, entries=150, sequenceid=303, filesize=30.5 K 2024-11-28T09:23:35,156 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/B/5484b2e3bfca4576a422c2e7e2bb9149 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/5484b2e3bfca4576a422c2e7e2bb9149 2024-11-28T09:23:35,160 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/5484b2e3bfca4576a422c2e7e2bb9149, entries=150, sequenceid=303, filesize=12.0 K 2024-11-28T09:23:35,161 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/C/33b65e3b8081438d9cbb9f4a77b06d5c as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/33b65e3b8081438d9cbb9f4a77b06d5c 2024-11-28T09:23:35,174 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/33b65e3b8081438d9cbb9f4a77b06d5c, entries=150, sequenceid=303, filesize=12.0 K 2024-11-28T09:23:35,176 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 59fa39bbf7af22b65ee5a2abca8580d1 in 1139ms, sequenceid=303, compaction requested=false 2024-11-28T09:23:35,176 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 59fa39bbf7af22b65ee5a2abca8580d1: 2024-11-28T09:23:35,190 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:23:35,190 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=178 2024-11-28T09:23:35,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:35,191 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2837): Flushing 59fa39bbf7af22b65ee5a2abca8580d1 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-28T09:23:35,191 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59fa39bbf7af22b65ee5a2abca8580d1, store=A 2024-11-28T09:23:35,191 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:35,191 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59fa39bbf7af22b65ee5a2abca8580d1, store=B 2024-11-28T09:23:35,191 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:35,191 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59fa39bbf7af22b65ee5a2abca8580d1, store=C 2024-11-28T09:23:35,191 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:35,226 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411280d69b2ca11cd4eddb4dbd63522336baf_59fa39bbf7af22b65ee5a2abca8580d1 is 50, key is test_row_0/A:col10/1732785814125/Put/seqid=0 2024-11-28T09:23:35,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 59fa39bbf7af22b65ee5a2abca8580d1 2024-11-28T09:23:35,245 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. as already flushing 2024-11-28T09:23:35,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742584_1760 (size=12454) 2024-11-28T09:23:35,271 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:35,271 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:35,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 214 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38714 deadline: 1732785875267, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:35,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 228 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38682 deadline: 1732785875267, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:35,375 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:35,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38714 deadline: 1732785875373, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:35,376 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:35,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 230 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38682 deadline: 1732785875374, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:35,578 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:35,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38714 deadline: 1732785875577, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:35,588 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:35,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 232 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38682 deadline: 1732785875586, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:35,680 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:35,684 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411280d69b2ca11cd4eddb4dbd63522336baf_59fa39bbf7af22b65ee5a2abca8580d1 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411280d69b2ca11cd4eddb4dbd63522336baf_59fa39bbf7af22b65ee5a2abca8580d1 2024-11-28T09:23:35,685 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/7c811cc370dc4492aa383067529be2aa, store: [table=TestAcidGuarantees family=A region=59fa39bbf7af22b65ee5a2abca8580d1] 2024-11-28T09:23:35,686 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/7c811cc370dc4492aa383067529be2aa is 175, key is test_row_0/A:col10/1732785814125/Put/seqid=0 2024-11-28T09:23:35,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742585_1761 (size=31255) 2024-11-28T09:23:35,883 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:35,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38714 deadline: 1732785875881, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:35,893 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:35,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 234 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38682 deadline: 1732785875891, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:35,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-11-28T09:23:36,134 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=330, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/7c811cc370dc4492aa383067529be2aa 2024-11-28T09:23:36,151 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/B/1c78efa04d7f4ec1a45ee1e1c67e5185 is 50, key is test_row_0/B:col10/1732785814125/Put/seqid=0 2024-11-28T09:23:36,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742586_1762 (size=12301) 2024-11-28T09:23:36,391 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:36,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38714 deadline: 1732785876389, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:36,399 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:36,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 236 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38682 deadline: 1732785876398, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:36,603 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=330 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/B/1c78efa04d7f4ec1a45ee1e1c67e5185 2024-11-28T09:23:36,616 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/C/1393c59b11cc4b658a80c2375943529d is 50, key is test_row_0/C:col10/1732785814125/Put/seqid=0 2024-11-28T09:23:36,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742587_1763 (size=12301) 2024-11-28T09:23:36,655 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=330 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/C/1393c59b11cc4b658a80c2375943529d 2024-11-28T09:23:36,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/7c811cc370dc4492aa383067529be2aa as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/7c811cc370dc4492aa383067529be2aa 2024-11-28T09:23:36,666 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/7c811cc370dc4492aa383067529be2aa, entries=150, sequenceid=330, filesize=30.5 K 2024-11-28T09:23:36,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/B/1c78efa04d7f4ec1a45ee1e1c67e5185 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/1c78efa04d7f4ec1a45ee1e1c67e5185 2024-11-28T09:23:36,672 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/1c78efa04d7f4ec1a45ee1e1c67e5185, entries=150, sequenceid=330, filesize=12.0 K 2024-11-28T09:23:36,672 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/C/1393c59b11cc4b658a80c2375943529d as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/1393c59b11cc4b658a80c2375943529d 2024-11-28T09:23:36,679 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/1393c59b11cc4b658a80c2375943529d, entries=150, sequenceid=330, filesize=12.0 K 2024-11-28T09:23:36,680 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 59fa39bbf7af22b65ee5a2abca8580d1 in 1489ms, sequenceid=330, compaction requested=true 2024-11-28T09:23:36,680 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.HRegion(2538): Flush status journal for 59fa39bbf7af22b65ee5a2abca8580d1: 2024-11-28T09:23:36,680 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:36,680 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=178 2024-11-28T09:23:36,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4106): Remote procedure done, pid=178 2024-11-28T09:23:36,683 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=178, resume processing ppid=177 2024-11-28T09:23:36,683 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=178, ppid=177, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.8710 sec 2024-11-28T09:23:36,686 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=177, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=177, table=TestAcidGuarantees in 2.8760 sec 2024-11-28T09:23:37,396 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 59fa39bbf7af22b65ee5a2abca8580d1 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-28T09:23:37,396 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59fa39bbf7af22b65ee5a2abca8580d1, store=A 2024-11-28T09:23:37,397 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:37,397 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59fa39bbf7af22b65ee5a2abca8580d1, store=B 2024-11-28T09:23:37,397 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:37,397 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59fa39bbf7af22b65ee5a2abca8580d1, store=C 2024-11-28T09:23:37,397 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:37,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 59fa39bbf7af22b65ee5a2abca8580d1 2024-11-28T09:23:37,430 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128feb8e49ac20340b981d3779eec8eb3eb_59fa39bbf7af22b65ee5a2abca8580d1 is 50, key is test_row_0/A:col10/1732785815260/Put/seqid=0 2024-11-28T09:23:37,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742588_1764 (size=14994) 2024-11-28T09:23:37,495 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:37,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 247 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38682 deadline: 1732785877491, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:37,498 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:37,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 236 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38714 deadline: 1732785877494, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:37,597 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:37,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 249 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38682 deadline: 1732785877597, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:37,603 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:37,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 238 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38714 deadline: 1732785877599, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:37,803 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:37,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 251 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38682 deadline: 1732785877800, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:37,808 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:37,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 240 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38714 deadline: 1732785877805, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:37,862 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:37,866 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128feb8e49ac20340b981d3779eec8eb3eb_59fa39bbf7af22b65ee5a2abca8580d1 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128feb8e49ac20340b981d3779eec8eb3eb_59fa39bbf7af22b65ee5a2abca8580d1 2024-11-28T09:23:37,867 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/80e1db10708941f38003b0d9844c6365, store: [table=TestAcidGuarantees family=A region=59fa39bbf7af22b65ee5a2abca8580d1] 2024-11-28T09:23:37,868 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/80e1db10708941f38003b0d9844c6365 is 175, key is test_row_0/A:col10/1732785815260/Put/seqid=0 2024-11-28T09:23:37,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-11-28T09:23:37,915 INFO [Thread-2928 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 177 completed 2024-11-28T09:23:37,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742589_1765 (size=39949) 2024-11-28T09:23:37,926 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=343, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/80e1db10708941f38003b0d9844c6365 2024-11-28T09:23:37,928 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T09:23:37,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] procedure2.ProcedureExecutor(1098): Stored pid=179, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=179, table=TestAcidGuarantees 2024-11-28T09:23:37,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-11-28T09:23:37,931 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=179, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=179, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T09:23:37,931 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=179, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=179, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T09:23:37,932 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=180, ppid=179, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T09:23:37,954 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/B/3472e918b0344c27ab589decfc2cd3a6 is 50, key is test_row_0/B:col10/1732785815260/Put/seqid=0 2024-11-28T09:23:37,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742590_1766 (size=12301) 2024-11-28T09:23:37,992 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=343 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/B/3472e918b0344c27ab589decfc2cd3a6 2024-11-28T09:23:38,009 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/C/2ec0ae66f96040a9831bf97dce8efb86 is 50, key is test_row_0/C:col10/1732785815260/Put/seqid=0 2024-11-28T09:23:38,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-11-28T09:23:38,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742591_1767 (size=12301) 2024-11-28T09:23:38,054 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=343 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/C/2ec0ae66f96040a9831bf97dce8efb86 2024-11-28T09:23:38,060 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/80e1db10708941f38003b0d9844c6365 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/80e1db10708941f38003b0d9844c6365 2024-11-28T09:23:38,066 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/80e1db10708941f38003b0d9844c6365, entries=200, sequenceid=343, filesize=39.0 K 2024-11-28T09:23:38,067 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/B/3472e918b0344c27ab589decfc2cd3a6 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/3472e918b0344c27ab589decfc2cd3a6 2024-11-28T09:23:38,071 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/3472e918b0344c27ab589decfc2cd3a6, entries=150, sequenceid=343, filesize=12.0 K 2024-11-28T09:23:38,072 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/C/2ec0ae66f96040a9831bf97dce8efb86 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/2ec0ae66f96040a9831bf97dce8efb86 2024-11-28T09:23:38,082 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/2ec0ae66f96040a9831bf97dce8efb86, entries=150, sequenceid=343, filesize=12.0 K 2024-11-28T09:23:38,082 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 59fa39bbf7af22b65ee5a2abca8580d1 in 686ms, sequenceid=343, compaction requested=true 2024-11-28T09:23:38,083 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 59fa39bbf7af22b65ee5a2abca8580d1: 2024-11-28T09:23:38,083 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 59fa39bbf7af22b65ee5a2abca8580d1:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T09:23:38,083 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:23:38,083 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T09:23:38,083 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T09:23:38,083 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 59fa39bbf7af22b65ee5a2abca8580d1:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T09:23:38,083 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:23:38,083 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 59fa39bbf7af22b65ee5a2abca8580d1:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T09:23:38,083 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:23:38,083 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:23:38,084 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=180 2024-11-28T09:23:38,084 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:38,084 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2837): Flushing 59fa39bbf7af22b65ee5a2abca8580d1 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-28T09:23:38,084 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59fa39bbf7af22b65ee5a2abca8580d1, store=A 2024-11-28T09:23:38,084 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:38,084 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59fa39bbf7af22b65ee5a2abca8580d1, store=B 2024-11-28T09:23:38,084 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:38,084 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59fa39bbf7af22b65ee5a2abca8580d1, store=C 2024-11-28T09:23:38,084 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:38,085 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49886 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T09:23:38,085 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): 59fa39bbf7af22b65ee5a2abca8580d1/B is initiating minor compaction (all files) 2024-11-28T09:23:38,085 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 59fa39bbf7af22b65ee5a2abca8580d1/B in TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:38,086 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/19283596fdab48c3b50004d7f0c1ad66, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/5484b2e3bfca4576a422c2e7e2bb9149, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/1c78efa04d7f4ec1a45ee1e1c67e5185, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/3472e918b0344c27ab589decfc2cd3a6] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp, totalSize=48.7 K 2024-11-28T09:23:38,086 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 134396 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T09:23:38,086 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1540): 59fa39bbf7af22b65ee5a2abca8580d1/A is initiating minor compaction (all files) 2024-11-28T09:23:38,086 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 59fa39bbf7af22b65ee5a2abca8580d1/A in TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:38,086 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/bb2af9a6bfad448aa32d5074a34f029e, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/573f5c378130441cafea97ea84953bf4, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/7c811cc370dc4492aa383067529be2aa, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/80e1db10708941f38003b0d9844c6365] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp, totalSize=131.2 K 2024-11-28T09:23:38,086 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:38,087 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. files: [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/bb2af9a6bfad448aa32d5074a34f029e, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/573f5c378130441cafea97ea84953bf4, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/7c811cc370dc4492aa383067529be2aa, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/80e1db10708941f38003b0d9844c6365] 2024-11-28T09:23:38,087 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 19283596fdab48c3b50004d7f0c1ad66, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1732785813377 2024-11-28T09:23:38,087 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting bb2af9a6bfad448aa32d5074a34f029e, keycount=150, bloomtype=ROW, size=31.2 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1732785813377 2024-11-28T09:23:38,088 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 5484b2e3bfca4576a422c2e7e2bb9149, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=303, earliestPutTs=1732785814026 2024-11-28T09:23:38,089 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 573f5c378130441cafea97ea84953bf4, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=303, earliestPutTs=1732785814026 2024-11-28T09:23:38,089 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 1c78efa04d7f4ec1a45ee1e1c67e5185, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=330, earliestPutTs=1732785814120 2024-11-28T09:23:38,093 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7c811cc370dc4492aa383067529be2aa, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=330, earliestPutTs=1732785814120 2024-11-28T09:23:38,094 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 3472e918b0344c27ab589decfc2cd3a6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=343, earliestPutTs=1732785815260 2024-11-28T09:23:38,095 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 80e1db10708941f38003b0d9844c6365, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=343, earliestPutTs=1732785815248 2024-11-28T09:23:38,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 59fa39bbf7af22b65ee5a2abca8580d1 2024-11-28T09:23:38,114 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112840220e2af2af4777ba0ebcafecd6e10f_59fa39bbf7af22b65ee5a2abca8580d1 is 50, key is test_row_0/A:col10/1732785817490/Put/seqid=0 2024-11-28T09:23:38,114 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. as already flushing 2024-11-28T09:23:38,134 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 59fa39bbf7af22b65ee5a2abca8580d1#B#compaction#655 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:23:38,135 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/B/67f82bf2a8b348aaba32836a5a610c66 is 50, key is test_row_0/B:col10/1732785815260/Put/seqid=0 2024-11-28T09:23:38,140 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=59fa39bbf7af22b65ee5a2abca8580d1] 2024-11-28T09:23:38,150 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:38,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 258 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38682 deadline: 1732785878145, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:38,150 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:38,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 247 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38714 deadline: 1732785878146, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:38,160 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241128aec25fb1e215451eab03cfd74cea1037_59fa39bbf7af22b65ee5a2abca8580d1 store=[table=TestAcidGuarantees family=A region=59fa39bbf7af22b65ee5a2abca8580d1] 2024-11-28T09:23:38,164 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241128aec25fb1e215451eab03cfd74cea1037_59fa39bbf7af22b65ee5a2abca8580d1, store=[table=TestAcidGuarantees family=A region=59fa39bbf7af22b65ee5a2abca8580d1] 2024-11-28T09:23:38,164 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128aec25fb1e215451eab03cfd74cea1037_59fa39bbf7af22b65ee5a2abca8580d1 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=59fa39bbf7af22b65ee5a2abca8580d1] 2024-11-28T09:23:38,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742592_1768 (size=12454) 2024-11-28T09:23:38,229 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:38,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-11-28T09:23:38,235 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112840220e2af2af4777ba0ebcafecd6e10f_59fa39bbf7af22b65ee5a2abca8580d1 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112840220e2af2af4777ba0ebcafecd6e10f_59fa39bbf7af22b65ee5a2abca8580d1 2024-11-28T09:23:38,237 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/c4cf759f9aca49178e72ccdee70c608c, store: [table=TestAcidGuarantees family=A region=59fa39bbf7af22b65ee5a2abca8580d1] 2024-11-28T09:23:38,237 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/c4cf759f9aca49178e72ccdee70c608c is 175, key is test_row_0/A:col10/1732785817490/Put/seqid=0 2024-11-28T09:23:38,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742593_1769 (size=13119) 2024-11-28T09:23:38,256 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:38,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 249 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38714 deadline: 1732785878251, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:38,256 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:38,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 260 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38682 deadline: 1732785878251, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:38,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742594_1770 (size=4469) 2024-11-28T09:23:38,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742595_1771 (size=31255) 2024-11-28T09:23:38,300 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=366, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/c4cf759f9aca49178e72ccdee70c608c 2024-11-28T09:23:38,326 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/B/0855790d6c304cf8b92256c04af28b59 is 50, key is test_row_0/B:col10/1732785817490/Put/seqid=0 2024-11-28T09:23:38,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742596_1772 (size=12301) 2024-11-28T09:23:38,369 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=366 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/B/0855790d6c304cf8b92256c04af28b59 2024-11-28T09:23:38,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/C/d091f3d3598d474d81c752540b359629 is 50, key is test_row_0/C:col10/1732785817490/Put/seqid=0 2024-11-28T09:23:38,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742597_1773 (size=12301) 2024-11-28T09:23:38,424 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=366 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/C/d091f3d3598d474d81c752540b359629 2024-11-28T09:23:38,429 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/c4cf759f9aca49178e72ccdee70c608c as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/c4cf759f9aca49178e72ccdee70c608c 2024-11-28T09:23:38,432 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/c4cf759f9aca49178e72ccdee70c608c, entries=150, sequenceid=366, filesize=30.5 K 2024-11-28T09:23:38,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/B/0855790d6c304cf8b92256c04af28b59 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/0855790d6c304cf8b92256c04af28b59 2024-11-28T09:23:38,437 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/0855790d6c304cf8b92256c04af28b59, entries=150, sequenceid=366, filesize=12.0 K 2024-11-28T09:23:38,438 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/C/d091f3d3598d474d81c752540b359629 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/d091f3d3598d474d81c752540b359629 2024-11-28T09:23:38,443 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/d091f3d3598d474d81c752540b359629, entries=150, sequenceid=366, filesize=12.0 K 2024-11-28T09:23:38,444 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 59fa39bbf7af22b65ee5a2abca8580d1 in 360ms, sequenceid=366, compaction requested=true 2024-11-28T09:23:38,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.HRegion(2538): Flush status journal for 59fa39bbf7af22b65ee5a2abca8580d1: 2024-11-28T09:23:38,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:38,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=180}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=180 2024-11-28T09:23:38,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4106): Remote procedure done, pid=180 2024-11-28T09:23:38,448 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=180, resume processing ppid=179 2024-11-28T09:23:38,448 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=180, ppid=179, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 514 msec 2024-11-28T09:23:38,453 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=179, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=179, table=TestAcidGuarantees in 521 msec 2024-11-28T09:23:38,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 59fa39bbf7af22b65ee5a2abca8580d1 2024-11-28T09:23:38,468 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 59fa39bbf7af22b65ee5a2abca8580d1 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-28T09:23:38,468 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59fa39bbf7af22b65ee5a2abca8580d1, store=A 2024-11-28T09:23:38,468 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:38,468 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59fa39bbf7af22b65ee5a2abca8580d1, store=B 2024-11-28T09:23:38,468 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:38,468 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59fa39bbf7af22b65ee5a2abca8580d1, store=C 2024-11-28T09:23:38,468 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:38,478 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411286f93b7296b194ef09eb1c76d77929a91_59fa39bbf7af22b65ee5a2abca8580d1 is 50, key is test_row_0/A:col10/1732785818467/Put/seqid=0 2024-11-28T09:23:38,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742598_1774 (size=17534) 2024-11-28T09:23:38,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-11-28T09:23:38,534 INFO [Thread-2928 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 179 completed 2024-11-28T09:23:38,534 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:38,536 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T09:23:38,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] procedure2.ProcedureExecutor(1098): Stored pid=181, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=181, table=TestAcidGuarantees 2024-11-28T09:23:38,539 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=181, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=181, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T09:23:38,539 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411286f93b7296b194ef09eb1c76d77929a91_59fa39bbf7af22b65ee5a2abca8580d1 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411286f93b7296b194ef09eb1c76d77929a91_59fa39bbf7af22b65ee5a2abca8580d1 2024-11-28T09:23:38,539 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=181, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=181, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T09:23:38,539 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=182, ppid=181, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T09:23:38,540 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/d3e6e3264914495194da1efd8f5b680a, store: [table=TestAcidGuarantees family=A region=59fa39bbf7af22b65ee5a2abca8580d1] 2024-11-28T09:23:38,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=181 2024-11-28T09:23:38,549 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/d3e6e3264914495194da1efd8f5b680a is 175, key is test_row_0/A:col10/1732785818467/Put/seqid=0 2024-11-28T09:23:38,565 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:38,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 261 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38714 deadline: 1732785878558, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:38,570 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:38,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 272 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38682 deadline: 1732785878565, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:38,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742599_1775 (size=48639) 2024-11-28T09:23:38,594 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=381, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/d3e6e3264914495194da1efd8f5b680a 2024-11-28T09:23:38,616 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/B/d2f3c92e85da4f0f899882685d69fc5b is 50, key is test_row_0/B:col10/1732785818467/Put/seqid=0 2024-11-28T09:23:38,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=181 2024-11-28T09:23:38,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742600_1776 (size=12301) 2024-11-28T09:23:38,659 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/B/67f82bf2a8b348aaba32836a5a610c66 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/67f82bf2a8b348aaba32836a5a610c66 2024-11-28T09:23:38,667 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 59fa39bbf7af22b65ee5a2abca8580d1/B of 59fa39bbf7af22b65ee5a2abca8580d1 into 67f82bf2a8b348aaba32836a5a610c66(size=12.8 K), total size for store is 24.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:23:38,667 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 59fa39bbf7af22b65ee5a2abca8580d1: 2024-11-28T09:23:38,667 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1., storeName=59fa39bbf7af22b65ee5a2abca8580d1/B, priority=12, startTime=1732785818083; duration=0sec 2024-11-28T09:23:38,667 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:23:38,667 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 59fa39bbf7af22b65ee5a2abca8580d1:B 2024-11-28T09:23:38,667 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-11-28T09:23:38,669 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 62187 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-11-28T09:23:38,669 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): 59fa39bbf7af22b65ee5a2abca8580d1/C is initiating minor compaction (all files) 2024-11-28T09:23:38,669 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 59fa39bbf7af22b65ee5a2abca8580d1/C in TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:38,669 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/a6d73812c3ac4ae586f28fd8f50b806a, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/33b65e3b8081438d9cbb9f4a77b06d5c, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/1393c59b11cc4b658a80c2375943529d, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/2ec0ae66f96040a9831bf97dce8efb86, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/d091f3d3598d474d81c752540b359629] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp, totalSize=60.7 K 2024-11-28T09:23:38,670 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting a6d73812c3ac4ae586f28fd8f50b806a, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=291, earliestPutTs=1732785813377 2024-11-28T09:23:38,671 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:38,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 263 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38714 deadline: 1732785878666, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:38,671 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 33b65e3b8081438d9cbb9f4a77b06d5c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=303, earliestPutTs=1732785814026 2024-11-28T09:23:38,672 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 1393c59b11cc4b658a80c2375943529d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=330, earliestPutTs=1732785814120 2024-11-28T09:23:38,672 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 2ec0ae66f96040a9831bf97dce8efb86, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=343, earliestPutTs=1732785815260 2024-11-28T09:23:38,673 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting d091f3d3598d474d81c752540b359629, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=366, earliestPutTs=1732785817485 2024-11-28T09:23:38,675 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 59fa39bbf7af22b65ee5a2abca8580d1#A#compaction#656 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:23:38,675 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/c50ed4d76d294b03a5882093e6a7f321 is 175, key is test_row_0/A:col10/1732785815260/Put/seqid=0 2024-11-28T09:23:38,676 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:38,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 274 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38682 deadline: 1732785878672, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:38,690 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:23:38,690 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=182 2024-11-28T09:23:38,690 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:38,690 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. as already flushing 2024-11-28T09:23:38,690 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:38,690 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] handler.RSProcedureHandler(58): pid=182 java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:38,691 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=182 java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:38,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=182 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:38,698 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 59fa39bbf7af22b65ee5a2abca8580d1#C#compaction#661 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:23:38,698 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/C/deecdec0efad400582fa3133fa1e323d is 50, key is test_row_0/C:col10/1732785817490/Put/seqid=0 2024-11-28T09:23:38,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742601_1777 (size=32073) 2024-11-28T09:23:38,706 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/c50ed4d76d294b03a5882093e6a7f321 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/c50ed4d76d294b03a5882093e6a7f321 2024-11-28T09:23:38,712 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 59fa39bbf7af22b65ee5a2abca8580d1/A of 59fa39bbf7af22b65ee5a2abca8580d1 into c50ed4d76d294b03a5882093e6a7f321(size=31.3 K), total size for store is 61.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:23:38,712 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 59fa39bbf7af22b65ee5a2abca8580d1: 2024-11-28T09:23:38,712 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1., storeName=59fa39bbf7af22b65ee5a2abca8580d1/A, priority=12, startTime=1732785818083; duration=0sec 2024-11-28T09:23:38,712 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:23:38,712 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 59fa39bbf7af22b65ee5a2abca8580d1:A 2024-11-28T09:23:38,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742602_1778 (size=13153) 2024-11-28T09:23:38,740 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/C/deecdec0efad400582fa3133fa1e323d as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/deecdec0efad400582fa3133fa1e323d 2024-11-28T09:23:38,748 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in 59fa39bbf7af22b65ee5a2abca8580d1/C of 59fa39bbf7af22b65ee5a2abca8580d1 into deecdec0efad400582fa3133fa1e323d(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:23:38,748 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 59fa39bbf7af22b65ee5a2abca8580d1: 2024-11-28T09:23:38,748 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1., storeName=59fa39bbf7af22b65ee5a2abca8580d1/C, priority=11, startTime=1732785818083; duration=0sec 2024-11-28T09:23:38,748 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:23:38,748 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 59fa39bbf7af22b65ee5a2abca8580d1:C 2024-11-28T09:23:38,842 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:23:38,843 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=182 2024-11-28T09:23:38,843 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:38,843 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. as already flushing 2024-11-28T09:23:38,843 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:38,843 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] handler.RSProcedureHandler(58): pid=182 java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:38,843 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=182 java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:38,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=182 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:38,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=181 2024-11-28T09:23:38,874 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:38,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 265 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38714 deadline: 1732785878872, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:38,881 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:38,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 276 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38682 deadline: 1732785878879, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:38,995 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:23:38,996 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=182 2024-11-28T09:23:38,997 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:38,997 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. as already flushing 2024-11-28T09:23:38,997 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:38,997 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] handler.RSProcedureHandler(58): pid=182 java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:38,997 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=182 java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:38,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=182 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:39,047 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=381 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/B/d2f3c92e85da4f0f899882685d69fc5b 2024-11-28T09:23:39,073 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/C/2ff3a8ebca3845d2bed8a72f2eda29b8 is 50, key is test_row_0/C:col10/1732785818467/Put/seqid=0 2024-11-28T09:23:39,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742603_1779 (size=12301) 2024-11-28T09:23:39,148 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:23:39,148 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=182 2024-11-28T09:23:39,148 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:39,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. as already flushing 2024-11-28T09:23:39,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:39,149 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] handler.RSProcedureHandler(58): pid=182 java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:39,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=182 java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:39,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=181 2024-11-28T09:23:39,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=182 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:39,183 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:39,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 267 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38714 deadline: 1732785879180, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:39,187 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:39,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 278 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38682 deadline: 1732785879184, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:39,301 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:23:39,301 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=182 2024-11-28T09:23:39,301 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:39,302 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. as already flushing 2024-11-28T09:23:39,302 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:39,302 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] handler.RSProcedureHandler(58): pid=182 java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:39,302 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=182 java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:39,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=182 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:39,454 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:23:39,455 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=182 2024-11-28T09:23:39,455 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:39,455 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. as already flushing 2024-11-28T09:23:39,455 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:39,455 ERROR [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] handler.RSProcedureHandler(58): pid=182 java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:39,455 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-0 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=182 java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:39,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4114): Remote procedure failed, pid=182 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T09:23:39,508 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=381 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/C/2ff3a8ebca3845d2bed8a72f2eda29b8 2024-11-28T09:23:39,516 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/d3e6e3264914495194da1efd8f5b680a as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/d3e6e3264914495194da1efd8f5b680a 2024-11-28T09:23:39,521 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/d3e6e3264914495194da1efd8f5b680a, entries=250, sequenceid=381, filesize=47.5 K 2024-11-28T09:23:39,523 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/B/d2f3c92e85da4f0f899882685d69fc5b as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/d2f3c92e85da4f0f899882685d69fc5b 2024-11-28T09:23:39,527 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/d2f3c92e85da4f0f899882685d69fc5b, entries=150, sequenceid=381, filesize=12.0 K 2024-11-28T09:23:39,529 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/C/2ff3a8ebca3845d2bed8a72f2eda29b8 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/2ff3a8ebca3845d2bed8a72f2eda29b8 2024-11-28T09:23:39,534 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/2ff3a8ebca3845d2bed8a72f2eda29b8, entries=150, sequenceid=381, filesize=12.0 K 2024-11-28T09:23:39,535 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 59fa39bbf7af22b65ee5a2abca8580d1 in 1067ms, sequenceid=381, compaction requested=true 2024-11-28T09:23:39,536 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 59fa39bbf7af22b65ee5a2abca8580d1: 2024-11-28T09:23:39,536 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:23:39,537 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 59fa39bbf7af22b65ee5a2abca8580d1:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T09:23:39,537 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:23:39,537 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 111967 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:23:39,537 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1540): 59fa39bbf7af22b65ee5a2abca8580d1/A is initiating minor compaction (all files) 2024-11-28T09:23:39,537 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 59fa39bbf7af22b65ee5a2abca8580d1/A in TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:39,538 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/c50ed4d76d294b03a5882093e6a7f321, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/c4cf759f9aca49178e72ccdee70c608c, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/d3e6e3264914495194da1efd8f5b680a] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp, totalSize=109.3 K 2024-11-28T09:23:39,538 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:39,538 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. files: [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/c50ed4d76d294b03a5882093e6a7f321, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/c4cf759f9aca49178e72ccdee70c608c, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/d3e6e3264914495194da1efd8f5b680a] 2024-11-28T09:23:39,538 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting c50ed4d76d294b03a5882093e6a7f321, keycount=150, bloomtype=ROW, size=31.3 K, encoding=NONE, compression=NONE, seqNum=343, earliestPutTs=1732785815260 2024-11-28T09:23:39,539 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-28T09:23:39,539 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting c4cf759f9aca49178e72ccdee70c608c, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=366, earliestPutTs=1732785817485 2024-11-28T09:23:39,539 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting d3e6e3264914495194da1efd8f5b680a, keycount=250, bloomtype=ROW, size=47.5 K, encoding=NONE, compression=NONE, seqNum=381, earliestPutTs=1732785818127 2024-11-28T09:23:39,539 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 59fa39bbf7af22b65ee5a2abca8580d1:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T09:23:39,539 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:23:39,539 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 59fa39bbf7af22b65ee5a2abca8580d1:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T09:23:39,540 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:23:39,540 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37721 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-28T09:23:39,540 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): 59fa39bbf7af22b65ee5a2abca8580d1/B is initiating minor compaction (all files) 2024-11-28T09:23:39,540 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 59fa39bbf7af22b65ee5a2abca8580d1/B in TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:39,541 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/67f82bf2a8b348aaba32836a5a610c66, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/0855790d6c304cf8b92256c04af28b59, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/d2f3c92e85da4f0f899882685d69fc5b] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp, totalSize=36.8 K 2024-11-28T09:23:39,541 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 67f82bf2a8b348aaba32836a5a610c66, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=343, earliestPutTs=1732785815260 2024-11-28T09:23:39,542 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 0855790d6c304cf8b92256c04af28b59, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=366, earliestPutTs=1732785817485 2024-11-28T09:23:39,543 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting d2f3c92e85da4f0f899882685d69fc5b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=381, earliestPutTs=1732785818133 2024-11-28T09:23:39,551 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=59fa39bbf7af22b65ee5a2abca8580d1] 2024-11-28T09:23:39,552 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 59fa39bbf7af22b65ee5a2abca8580d1#B#compaction#664 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:23:39,553 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/B/6fe20ee1edaf4d2bb9029e5a34d896ed is 50, key is test_row_0/B:col10/1732785818467/Put/seqid=0 2024-11-28T09:23:39,566 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024112855ccd38cc9c24783ac01dc00eec51c90_59fa39bbf7af22b65ee5a2abca8580d1 store=[table=TestAcidGuarantees family=A region=59fa39bbf7af22b65ee5a2abca8580d1] 2024-11-28T09:23:39,568 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024112855ccd38cc9c24783ac01dc00eec51c90_59fa39bbf7af22b65ee5a2abca8580d1, store=[table=TestAcidGuarantees family=A region=59fa39bbf7af22b65ee5a2abca8580d1] 2024-11-28T09:23:39,568 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112855ccd38cc9c24783ac01dc00eec51c90_59fa39bbf7af22b65ee5a2abca8580d1 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=59fa39bbf7af22b65ee5a2abca8580d1] 2024-11-28T09:23:39,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742604_1780 (size=13221) 2024-11-28T09:23:39,607 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:23:39,608 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/B/6fe20ee1edaf4d2bb9029e5a34d896ed as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/6fe20ee1edaf4d2bb9029e5a34d896ed 2024-11-28T09:23:39,608 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=182 2024-11-28T09:23:39,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:39,608 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegion(2837): Flushing 59fa39bbf7af22b65ee5a2abca8580d1 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-28T09:23:39,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59fa39bbf7af22b65ee5a2abca8580d1, store=A 2024-11-28T09:23:39,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:39,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59fa39bbf7af22b65ee5a2abca8580d1, store=B 2024-11-28T09:23:39,608 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:39,609 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59fa39bbf7af22b65ee5a2abca8580d1, store=C 2024-11-28T09:23:39,609 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:39,614 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 59fa39bbf7af22b65ee5a2abca8580d1/B of 59fa39bbf7af22b65ee5a2abca8580d1 into 6fe20ee1edaf4d2bb9029e5a34d896ed(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:23:39,614 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 59fa39bbf7af22b65ee5a2abca8580d1: 2024-11-28T09:23:39,614 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1., storeName=59fa39bbf7af22b65ee5a2abca8580d1/B, priority=13, startTime=1732785819537; duration=0sec 2024-11-28T09:23:39,614 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:23:39,614 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 59fa39bbf7af22b65ee5a2abca8580d1:B 2024-11-28T09:23:39,614 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-11-28T09:23:39,615 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-28T09:23:39,615 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-28T09:23:39,615 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. because compaction request was cancelled 2024-11-28T09:23:39,615 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 59fa39bbf7af22b65ee5a2abca8580d1:C 2024-11-28T09:23:39,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742605_1781 (size=4469) 2024-11-28T09:23:39,618 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 59fa39bbf7af22b65ee5a2abca8580d1#A#compaction#663 average throughput is 0.37 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:23:39,618 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/43009b1219b04e58bc21b57ef339be50 is 175, key is test_row_0/A:col10/1732785818467/Put/seqid=0 2024-11-28T09:23:39,647 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128393bbb86ad1d4de899326e9fe942df16_59fa39bbf7af22b65ee5a2abca8580d1 is 50, key is test_row_0/A:col10/1732785818557/Put/seqid=0 2024-11-28T09:23:39,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=181 2024-11-28T09:23:39,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742606_1782 (size=32175) 2024-11-28T09:23:39,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742607_1783 (size=12454) 2024-11-28T09:23:39,664 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/43009b1219b04e58bc21b57ef339be50 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/43009b1219b04e58bc21b57ef339be50 2024-11-28T09:23:39,669 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 59fa39bbf7af22b65ee5a2abca8580d1/A of 59fa39bbf7af22b65ee5a2abca8580d1 into 43009b1219b04e58bc21b57ef339be50(size=31.4 K), total size for store is 31.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:23:39,669 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 59fa39bbf7af22b65ee5a2abca8580d1: 2024-11-28T09:23:39,669 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1., storeName=59fa39bbf7af22b65ee5a2abca8580d1/A, priority=13, startTime=1732785819536; duration=0sec 2024-11-28T09:23:39,669 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:23:39,669 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 59fa39bbf7af22b65ee5a2abca8580d1:A 2024-11-28T09:23:39,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 59fa39bbf7af22b65ee5a2abca8580d1 2024-11-28T09:23:39,688 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. as already flushing 2024-11-28T09:23:39,711 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:39,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 285 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38682 deadline: 1732785879708, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:39,711 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:39,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 276 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38714 deadline: 1732785879709, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:39,814 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:39,814 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:39,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 278 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38714 deadline: 1732785879812, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:39,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 287 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38682 deadline: 1732785879812, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:40,016 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:40,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 289 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38682 deadline: 1732785880015, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:40,016 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:40,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 280 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38714 deadline: 1732785880015, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:40,061 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:40,064 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128393bbb86ad1d4de899326e9fe942df16_59fa39bbf7af22b65ee5a2abca8580d1 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128393bbb86ad1d4de899326e9fe942df16_59fa39bbf7af22b65ee5a2abca8580d1 2024-11-28T09:23:40,065 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/85c57e9e71424766bb396a873f198aae, store: [table=TestAcidGuarantees family=A region=59fa39bbf7af22b65ee5a2abca8580d1] 2024-11-28T09:23:40,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/85c57e9e71424766bb396a873f198aae is 175, key is test_row_0/A:col10/1732785818557/Put/seqid=0 2024-11-28T09:23:40,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742608_1784 (size=31255) 2024-11-28T09:23:40,318 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:40,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 291 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38682 deadline: 1732785880317, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:40,319 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:40,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 282 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38714 deadline: 1732785880318, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:40,470 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=405, memsize=40.3 K, hasBloomFilter=true, into tmp file hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/85c57e9e71424766bb396a873f198aae 2024-11-28T09:23:40,477 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/B/a8a3a30828cb42d585cc68c93c145bf1 is 50, key is test_row_0/B:col10/1732785818557/Put/seqid=0 2024-11-28T09:23:40,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742609_1785 (size=12301) 2024-11-28T09:23:40,481 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=405 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/B/a8a3a30828cb42d585cc68c93c145bf1 2024-11-28T09:23:40,495 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/C/efe078bd778548818a2b2d3edd352acb is 50, key is test_row_0/C:col10/1732785818557/Put/seqid=0 2024-11-28T09:23:40,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742610_1786 (size=12301) 2024-11-28T09:23:40,509 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=405 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/C/efe078bd778548818a2b2d3edd352acb 2024-11-28T09:23:40,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/85c57e9e71424766bb396a873f198aae as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/85c57e9e71424766bb396a873f198aae 2024-11-28T09:23:40,517 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/85c57e9e71424766bb396a873f198aae, entries=150, sequenceid=405, filesize=30.5 K 2024-11-28T09:23:40,518 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/B/a8a3a30828cb42d585cc68c93c145bf1 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/a8a3a30828cb42d585cc68c93c145bf1 2024-11-28T09:23:40,521 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/a8a3a30828cb42d585cc68c93c145bf1, entries=150, sequenceid=405, filesize=12.0 K 2024-11-28T09:23:40,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/C/efe078bd778548818a2b2d3edd352acb as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/efe078bd778548818a2b2d3edd352acb 2024-11-28T09:23:40,525 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/efe078bd778548818a2b2d3edd352acb, entries=150, sequenceid=405, filesize=12.0 K 2024-11-28T09:23:40,526 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=80.51 KB/82440 for 59fa39bbf7af22b65ee5a2abca8580d1 in 918ms, sequenceid=405, compaction requested=true 2024-11-28T09:23:40,526 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.HRegion(2538): Flush status journal for 59fa39bbf7af22b65ee5a2abca8580d1: 2024-11-28T09:23:40,526 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:40,526 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-1 {event_type=RS_FLUSH_REGIONS, pid=182}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=182 2024-11-28T09:23:40,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4106): Remote procedure done, pid=182 2024-11-28T09:23:40,528 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=182, resume processing ppid=181 2024-11-28T09:23:40,529 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=182, ppid=181, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.9880 sec 2024-11-28T09:23:40,530 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=181, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=181, table=TestAcidGuarantees in 1.9930 sec 2024-11-28T09:23:40,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=181 2024-11-28T09:23:40,651 INFO [Thread-2928 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 181 completed 2024-11-28T09:23:40,652 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.2 flush TestAcidGuarantees 2024-11-28T09:23:40,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] procedure2.ProcedureExecutor(1098): Stored pid=183, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=183, table=TestAcidGuarantees 2024-11-28T09:23:40,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=183 2024-11-28T09:23:40,654 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=183, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=183, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-28T09:23:40,654 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=183, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=183, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-28T09:23:40,654 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=184, ppid=183, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-28T09:23:40,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=183 2024-11-28T09:23:40,806 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:23:40,806 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33819 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=184 2024-11-28T09:23:40,806 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:40,806 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.HRegion(2837): Flushing 59fa39bbf7af22b65ee5a2abca8580d1 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-28T09:23:40,807 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59fa39bbf7af22b65ee5a2abca8580d1, store=A 2024-11-28T09:23:40,807 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:40,807 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59fa39bbf7af22b65ee5a2abca8580d1, store=B 2024-11-28T09:23:40,807 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:40,807 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59fa39bbf7af22b65ee5a2abca8580d1, store=C 2024-11-28T09:23:40,807 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:40,813 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112872eb69f9a4ca462e8c27f8fe93b8a7be_59fa39bbf7af22b65ee5a2abca8580d1 is 50, key is test_row_0/A:col10/1732785819702/Put/seqid=0 2024-11-28T09:23:40,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742611_1787 (size=12454) 2024-11-28T09:23:40,822 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:40,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 59fa39bbf7af22b65ee5a2abca8580d1 2024-11-28T09:23:40,823 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. as already flushing 2024-11-28T09:23:40,825 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024112872eb69f9a4ca462e8c27f8fe93b8a7be_59fa39bbf7af22b65ee5a2abca8580d1 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112872eb69f9a4ca462e8c27f8fe93b8a7be_59fa39bbf7af22b65ee5a2abca8580d1 2024-11-28T09:23:40,826 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/243f437bc98544a8a41bfce847a9fd77, store: [table=TestAcidGuarantees family=A region=59fa39bbf7af22b65ee5a2abca8580d1] 2024-11-28T09:23:40,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/243f437bc98544a8a41bfce847a9fd77 is 175, key is test_row_0/A:col10/1732785819702/Put/seqid=0 2024-11-28T09:23:40,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742612_1788 (size=31255) 2024-11-28T09:23:40,849 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:40,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 302 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38682 deadline: 1732785880847, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:40,851 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:40,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 293 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38714 deadline: 1732785880849, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:40,951 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:40,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 304 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38682 deadline: 1732785880950, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:40,953 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:40,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 295 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38714 deadline: 1732785880952, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:40,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=183 2024-11-28T09:23:41,103 DEBUG [Thread-2933 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x53305d9b to 127.0.0.1:53251 2024-11-28T09:23:41,103 DEBUG [Thread-2931 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5d836f78 to 127.0.0.1:53251 2024-11-28T09:23:41,104 DEBUG [Thread-2933 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T09:23:41,104 DEBUG [Thread-2931 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T09:23:41,105 DEBUG [Thread-2935 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6bb6288a to 127.0.0.1:53251 2024-11-28T09:23:41,105 DEBUG [Thread-2935 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T09:23:41,106 DEBUG [Thread-2937 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x06556601 to 127.0.0.1:53251 2024-11-28T09:23:41,106 DEBUG [Thread-2937 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T09:23:41,106 DEBUG [Thread-2929 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x091d72db to 127.0.0.1:53251 2024-11-28T09:23:41,106 DEBUG [Thread-2929 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T09:23:41,153 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:41,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 306 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38682 deadline: 1732785881152, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:41,156 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:41,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 297 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38714 deadline: 1732785881155, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:41,187 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:41,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38726 deadline: 1732785881187, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:41,187 DEBUG [Thread-2918 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=8, retries=16, started=18209 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1., hostname=363d8d38a970,33819,1732785660637, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-28T09:23:41,195 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:41,195 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38722 deadline: 1732785881194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:41,195 DEBUG [Thread-2922 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=8, retries=16, started=18218 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1., hostname=363d8d38a970,33819,1732785660637, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-28T09:23:41,230 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=422, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/243f437bc98544a8a41bfce847a9fd77 2024-11-28T09:23:41,235 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/B/2bd459e3dccf446ea8906fbd80ccf228 is 50, key is test_row_0/B:col10/1732785819702/Put/seqid=0 2024-11-28T09:23:41,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742613_1789 (size=12301) 2024-11-28T09:23:41,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=183 2024-11-28T09:23:41,266 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:41,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38668 deadline: 1732785881266, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:41,267 DEBUG [Thread-2926 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=8, retries=16, started=18291 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1., hostname=363d8d38a970,33819,1732785660637, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-28T09:23:41,455 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:41,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 308 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38682 deadline: 1732785881455, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:41,457 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:41,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 299 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38714 deadline: 1732785881456, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:41,639 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=422 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/B/2bd459e3dccf446ea8906fbd80ccf228 2024-11-28T09:23:41,644 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/C/3d6212518b744148b117354c8daca2f9 is 50, key is test_row_0/C:col10/1732785819702/Put/seqid=0 2024-11-28T09:23:41,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742614_1790 (size=12301) 2024-11-28T09:23:41,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=183 2024-11-28T09:23:41,959 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:41,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 301 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38714 deadline: 1732785881959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:41,959 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-28T09:23:41,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] ipc.CallRunner(138): callId: 310 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.2:38682 deadline: 1732785881959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 2024-11-28T09:23:42,047 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=422 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/C/3d6212518b744148b117354c8daca2f9 2024-11-28T09:23:42,050 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/243f437bc98544a8a41bfce847a9fd77 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/243f437bc98544a8a41bfce847a9fd77 2024-11-28T09:23:42,052 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/243f437bc98544a8a41bfce847a9fd77, entries=150, sequenceid=422, filesize=30.5 K 2024-11-28T09:23:42,053 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/B/2bd459e3dccf446ea8906fbd80ccf228 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/2bd459e3dccf446ea8906fbd80ccf228 2024-11-28T09:23:42,055 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/2bd459e3dccf446ea8906fbd80ccf228, entries=150, sequenceid=422, filesize=12.0 K 2024-11-28T09:23:42,056 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/C/3d6212518b744148b117354c8daca2f9 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/3d6212518b744148b117354c8daca2f9 2024-11-28T09:23:42,058 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/3d6212518b744148b117354c8daca2f9, entries=150, sequenceid=422, filesize=12.0 K 2024-11-28T09:23:42,059 INFO [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 59fa39bbf7af22b65ee5a2abca8580d1 in 1253ms, sequenceid=422, compaction requested=true 2024-11-28T09:23:42,059 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.HRegion(2538): Flush status journal for 59fa39bbf7af22b65ee5a2abca8580d1: 2024-11-28T09:23:42,059 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:42,059 DEBUG [RS_FLUSH_OPERATIONS-regionserver/363d8d38a970:0-2 {event_type=RS_FLUSH_REGIONS, pid=184}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=184 2024-11-28T09:23:42,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster(4106): Remote procedure done, pid=184 2024-11-28T09:23:42,060 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=184, resume processing ppid=183 2024-11-28T09:23:42,061 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=184, ppid=183, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4060 sec 2024-11-28T09:23:42,061 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=183, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=183, table=TestAcidGuarantees in 1.4090 sec 2024-11-28T09:23:42,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=183 2024-11-28T09:23:42,757 INFO [Thread-2928 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 183 completed 2024-11-28T09:23:42,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=33819 {}] regionserver.HRegion(8581): Flush requested on 59fa39bbf7af22b65ee5a2abca8580d1 2024-11-28T09:23:42,964 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 59fa39bbf7af22b65ee5a2abca8580d1 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-28T09:23:42,965 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59fa39bbf7af22b65ee5a2abca8580d1, store=A 2024-11-28T09:23:42,965 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:42,965 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59fa39bbf7af22b65ee5a2abca8580d1, store=B 2024-11-28T09:23:42,965 DEBUG [Thread-2924 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x17327621 to 127.0.0.1:53251 2024-11-28T09:23:42,965 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:42,965 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59fa39bbf7af22b65ee5a2abca8580d1, store=C 2024-11-28T09:23:42,965 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:42,965 DEBUG [Thread-2924 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T09:23:42,969 DEBUG [Thread-2920 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0d5efb7a to 127.0.0.1:53251 2024-11-28T09:23:42,969 DEBUG [Thread-2920 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T09:23:42,970 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128aac094fb5a304c45b306a09813bfb067_59fa39bbf7af22b65ee5a2abca8580d1 is 50, key is test_row_0/A:col10/1732785820848/Put/seqid=0 2024-11-28T09:23:42,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742615_1791 (size=12454) 2024-11-28T09:23:43,373 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:43,376 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128aac094fb5a304c45b306a09813bfb067_59fa39bbf7af22b65ee5a2abca8580d1 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128aac094fb5a304c45b306a09813bfb067_59fa39bbf7af22b65ee5a2abca8580d1 2024-11-28T09:23:43,377 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/e2146ffaa3404c0e8067ee11cace9d95, store: [table=TestAcidGuarantees family=A region=59fa39bbf7af22b65ee5a2abca8580d1] 2024-11-28T09:23:43,377 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/e2146ffaa3404c0e8067ee11cace9d95 is 175, key is test_row_0/A:col10/1732785820848/Put/seqid=0 2024-11-28T09:23:43,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742616_1792 (size=31255) 2024-11-28T09:23:43,781 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=444, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/e2146ffaa3404c0e8067ee11cace9d95 2024-11-28T09:23:43,787 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/B/0bfe8861dc194f9e99ea8ad6a9d5ed66 is 50, key is test_row_0/B:col10/1732785820848/Put/seqid=0 2024-11-28T09:23:43,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742617_1793 (size=12301) 2024-11-28T09:23:44,190 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=444 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/B/0bfe8861dc194f9e99ea8ad6a9d5ed66 2024-11-28T09:23:44,195 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/C/6ce8b808dd81427a9b2f880f990947cd is 50, key is test_row_0/C:col10/1732785820848/Put/seqid=0 2024-11-28T09:23:44,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742618_1794 (size=12301) 2024-11-28T09:23:44,598 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=444 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/C/6ce8b808dd81427a9b2f880f990947cd 2024-11-28T09:23:44,601 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/e2146ffaa3404c0e8067ee11cace9d95 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/e2146ffaa3404c0e8067ee11cace9d95 2024-11-28T09:23:44,604 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/e2146ffaa3404c0e8067ee11cace9d95, entries=150, sequenceid=444, filesize=30.5 K 2024-11-28T09:23:44,604 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/B/0bfe8861dc194f9e99ea8ad6a9d5ed66 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/0bfe8861dc194f9e99ea8ad6a9d5ed66 2024-11-28T09:23:44,607 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/0bfe8861dc194f9e99ea8ad6a9d5ed66, entries=150, sequenceid=444, filesize=12.0 K 2024-11-28T09:23:44,607 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/C/6ce8b808dd81427a9b2f880f990947cd as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/6ce8b808dd81427a9b2f880f990947cd 2024-11-28T09:23:44,609 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/6ce8b808dd81427a9b2f880f990947cd, entries=150, sequenceid=444, filesize=12.0 K 2024-11-28T09:23:44,610 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=6.71 KB/6870 for 59fa39bbf7af22b65ee5a2abca8580d1 in 1646ms, sequenceid=444, compaction requested=true 2024-11-28T09:23:44,610 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 59fa39bbf7af22b65ee5a2abca8580d1: 2024-11-28T09:23:44,610 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 59fa39bbf7af22b65ee5a2abca8580d1:A, priority=-2147483648, current under compaction store size is 1 2024-11-28T09:23:44,610 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:23:44,610 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 59fa39bbf7af22b65ee5a2abca8580d1:B, priority=-2147483648, current under compaction store size is 2 2024-11-28T09:23:44,610 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:23:44,610 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 59fa39bbf7af22b65ee5a2abca8580d1:C, priority=-2147483648, current under compaction store size is 3 2024-11-28T09:23:44,610 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T09:23:44,610 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:23:44,610 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-28T09:23:44,611 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 125940 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T09:23:44,611 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1540): 59fa39bbf7af22b65ee5a2abca8580d1/A is initiating minor compaction (all files) 2024-11-28T09:23:44,611 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 50124 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-28T09:23:44,611 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 59fa39bbf7af22b65ee5a2abca8580d1/A in TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:44,611 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): 59fa39bbf7af22b65ee5a2abca8580d1/B is initiating minor compaction (all files) 2024-11-28T09:23:44,611 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/43009b1219b04e58bc21b57ef339be50, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/85c57e9e71424766bb396a873f198aae, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/243f437bc98544a8a41bfce847a9fd77, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/e2146ffaa3404c0e8067ee11cace9d95] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp, totalSize=123.0 K 2024-11-28T09:23:44,611 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 59fa39bbf7af22b65ee5a2abca8580d1/B in TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:44,611 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:44,611 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. files: [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/43009b1219b04e58bc21b57ef339be50, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/85c57e9e71424766bb396a873f198aae, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/243f437bc98544a8a41bfce847a9fd77, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/e2146ffaa3404c0e8067ee11cace9d95] 2024-11-28T09:23:44,611 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/6fe20ee1edaf4d2bb9029e5a34d896ed, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/a8a3a30828cb42d585cc68c93c145bf1, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/2bd459e3dccf446ea8906fbd80ccf228, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/0bfe8861dc194f9e99ea8ad6a9d5ed66] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp, totalSize=48.9 K 2024-11-28T09:23:44,611 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 43009b1219b04e58bc21b57ef339be50, keycount=150, bloomtype=ROW, size=31.4 K, encoding=NONE, compression=NONE, seqNum=381, earliestPutTs=1732785818133 2024-11-28T09:23:44,612 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 6fe20ee1edaf4d2bb9029e5a34d896ed, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=381, earliestPutTs=1732785818133 2024-11-28T09:23:44,612 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 85c57e9e71424766bb396a873f198aae, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=405, earliestPutTs=1732785818548 2024-11-28T09:23:44,612 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting a8a3a30828cb42d585cc68c93c145bf1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=405, earliestPutTs=1732785818548 2024-11-28T09:23:44,612 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting 243f437bc98544a8a41bfce847a9fd77, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=422, earliestPutTs=1732785819702 2024-11-28T09:23:44,612 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 2bd459e3dccf446ea8906fbd80ccf228, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=422, earliestPutTs=1732785819702 2024-11-28T09:23:44,612 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] compactions.Compactor(224): Compacting e2146ffaa3404c0e8067ee11cace9d95, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=444, earliestPutTs=1732785820845 2024-11-28T09:23:44,612 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 0bfe8861dc194f9e99ea8ad6a9d5ed66, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=444, earliestPutTs=1732785820845 2024-11-28T09:23:44,618 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=59fa39bbf7af22b65ee5a2abca8580d1] 2024-11-28T09:23:44,619 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 59fa39bbf7af22b65ee5a2abca8580d1#B#compaction#675 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:23:44,619 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241128d4e4db52d2f94913aa3cd5a84934904d_59fa39bbf7af22b65ee5a2abca8580d1 store=[table=TestAcidGuarantees family=A region=59fa39bbf7af22b65ee5a2abca8580d1] 2024-11-28T09:23:44,619 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/B/f6983d010ba44245a4d92b6858f5a4f4 is 50, key is test_row_0/B:col10/1732785820848/Put/seqid=0 2024-11-28T09:23:44,622 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241128d4e4db52d2f94913aa3cd5a84934904d_59fa39bbf7af22b65ee5a2abca8580d1, store=[table=TestAcidGuarantees family=A region=59fa39bbf7af22b65ee5a2abca8580d1] 2024-11-28T09:23:44,622 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128d4e4db52d2f94913aa3cd5a84934904d_59fa39bbf7af22b65ee5a2abca8580d1 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=59fa39bbf7af22b65ee5a2abca8580d1] 2024-11-28T09:23:44,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742619_1795 (size=13357) 2024-11-28T09:23:44,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742620_1796 (size=4469) 2024-11-28T09:23:45,033 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/B/f6983d010ba44245a4d92b6858f5a4f4 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/f6983d010ba44245a4d92b6858f5a4f4 2024-11-28T09:23:45,036 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 59fa39bbf7af22b65ee5a2abca8580d1/B of 59fa39bbf7af22b65ee5a2abca8580d1 into f6983d010ba44245a4d92b6858f5a4f4(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:23:45,036 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 59fa39bbf7af22b65ee5a2abca8580d1: 2024-11-28T09:23:45,036 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 59fa39bbf7af22b65ee5a2abca8580d1#A#compaction#674 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:23:45,036 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1., storeName=59fa39bbf7af22b65ee5a2abca8580d1/B, priority=12, startTime=1732785824610; duration=0sec 2024-11-28T09:23:45,036 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-28T09:23:45,036 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 59fa39bbf7af22b65ee5a2abca8580d1:B 2024-11-28T09:23:45,036 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-11-28T09:23:45,036 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/b587d6c87e5a496a8272e9db2a2c80c6 is 175, key is test_row_0/A:col10/1732785820848/Put/seqid=0 2024-11-28T09:23:45,037 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 62357 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-11-28T09:23:45,037 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1540): 59fa39bbf7af22b65ee5a2abca8580d1/C is initiating minor compaction (all files) 2024-11-28T09:23:45,037 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 59fa39bbf7af22b65ee5a2abca8580d1/C in TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:45,037 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/deecdec0efad400582fa3133fa1e323d, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/2ff3a8ebca3845d2bed8a72f2eda29b8, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/efe078bd778548818a2b2d3edd352acb, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/3d6212518b744148b117354c8daca2f9, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/6ce8b808dd81427a9b2f880f990947cd] into tmpdir=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp, totalSize=60.9 K 2024-11-28T09:23:45,037 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting deecdec0efad400582fa3133fa1e323d, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=366, earliestPutTs=1732785817485 2024-11-28T09:23:45,038 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 2ff3a8ebca3845d2bed8a72f2eda29b8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=381, earliestPutTs=1732785818133 2024-11-28T09:23:45,038 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting efe078bd778548818a2b2d3edd352acb, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=405, earliestPutTs=1732785818548 2024-11-28T09:23:45,038 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 3d6212518b744148b117354c8daca2f9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=422, earliestPutTs=1732785819702 2024-11-28T09:23:45,038 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] compactions.Compactor(224): Compacting 6ce8b808dd81427a9b2f880f990947cd, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=444, earliestPutTs=1732785820845 2024-11-28T09:23:45,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742621_1797 (size=32311) 2024-11-28T09:23:45,045 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 59fa39bbf7af22b65ee5a2abca8580d1#C#compaction#676 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-28T09:23:45,046 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/C/065ae2f824b84c9ab670af11889268cd is 50, key is test_row_0/C:col10/1732785820848/Put/seqid=0 2024-11-28T09:23:45,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742622_1798 (size=13323) 2024-11-28T09:23:45,443 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/b587d6c87e5a496a8272e9db2a2c80c6 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/b587d6c87e5a496a8272e9db2a2c80c6 2024-11-28T09:23:45,446 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 59fa39bbf7af22b65ee5a2abca8580d1/A of 59fa39bbf7af22b65ee5a2abca8580d1 into b587d6c87e5a496a8272e9db2a2c80c6(size=31.6 K), total size for store is 31.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:23:45,446 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 59fa39bbf7af22b65ee5a2abca8580d1: 2024-11-28T09:23:45,446 INFO [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1., storeName=59fa39bbf7af22b65ee5a2abca8580d1/A, priority=12, startTime=1732785824610; duration=0sec 2024-11-28T09:23:45,446 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:23:45,446 DEBUG [RS:0;363d8d38a970:33819-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 59fa39bbf7af22b65ee5a2abca8580d1:A 2024-11-28T09:23:45,451 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/C/065ae2f824b84c9ab670af11889268cd as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/065ae2f824b84c9ab670af11889268cd 2024-11-28T09:23:45,454 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in 59fa39bbf7af22b65ee5a2abca8580d1/C of 59fa39bbf7af22b65ee5a2abca8580d1 into 065ae2f824b84c9ab670af11889268cd(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-28T09:23:45,454 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 59fa39bbf7af22b65ee5a2abca8580d1: 2024-11-28T09:23:45,454 INFO [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1., storeName=59fa39bbf7af22b65ee5a2abca8580d1/C, priority=11, startTime=1732785824610; duration=0sec 2024-11-28T09:23:45,454 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-28T09:23:45,454 DEBUG [RS:0;363d8d38a970:33819-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 59fa39bbf7af22b65ee5a2abca8580d1:C 2024-11-28T09:23:51,211 DEBUG [Thread-2918 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6d9954b7 to 127.0.0.1:53251 2024-11-28T09:23:51,211 DEBUG [Thread-2918 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T09:23:51,254 DEBUG [Thread-2922 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7fc332d8 to 127.0.0.1:53251 2024-11-28T09:23:51,254 DEBUG [Thread-2922 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T09:23:51,271 DEBUG [Thread-2926 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1584f18a to 127.0.0.1:53251 2024-11-28T09:23:51,271 DEBUG [Thread-2926 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T09:23:51,271 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-28T09:23:51,271 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 7 2024-11-28T09:23:51,271 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 161 2024-11-28T09:23:51,271 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 7 2024-11-28T09:23:51,271 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 152 2024-11-28T09:23:51,271 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 25 2024-11-28T09:23:51,271 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-28T09:23:51,271 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 3821 2024-11-28T09:23:51,271 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 3759 2024-11-28T09:23:51,271 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 3730 2024-11-28T09:23:51,271 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 3915 2024-11-28T09:23:51,271 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 3722 2024-11-28T09:23:51,271 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-28T09:23:51,271 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-28T09:23:51,271 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x17e5a47d to 127.0.0.1:53251 2024-11-28T09:23:51,271 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T09:23:51,272 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-28T09:23:51,272 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable TestAcidGuarantees 2024-11-28T09:23:51,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] procedure2.ProcedureExecutor(1098): Stored pid=185, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-28T09:23:51,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=185 2024-11-28T09:23:51,275 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732785831275"}]},"ts":"1732785831275"} 2024-11-28T09:23:51,276 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-28T09:23:51,278 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-28T09:23:51,279 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=186, ppid=185, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-28T09:23:51,280 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=187, ppid=186, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=59fa39bbf7af22b65ee5a2abca8580d1, UNASSIGN}] 2024-11-28T09:23:51,280 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=187, ppid=186, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=59fa39bbf7af22b65ee5a2abca8580d1, UNASSIGN 2024-11-28T09:23:51,281 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=187 updating hbase:meta row=59fa39bbf7af22b65ee5a2abca8580d1, regionState=CLOSING, regionLocation=363d8d38a970,33819,1732785660637 2024-11-28T09:23:51,282 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-28T09:23:51,282 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=188, ppid=187, state=RUNNABLE; CloseRegionProcedure 59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637}] 2024-11-28T09:23:51,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=185 2024-11-28T09:23:51,433 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 363d8d38a970,33819,1732785660637 2024-11-28T09:23:51,433 INFO [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=188}] handler.UnassignRegionHandler(124): Close 59fa39bbf7af22b65ee5a2abca8580d1 2024-11-28T09:23:51,433 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=188}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-28T09:23:51,434 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=188}] regionserver.HRegion(1681): Closing 59fa39bbf7af22b65ee5a2abca8580d1, disabling compactions & flushes 2024-11-28T09:23:51,434 INFO [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=188}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:51,434 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=188}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:51,434 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=188}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. after waiting 0 ms 2024-11-28T09:23:51,434 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=188}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:51,434 INFO [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=188}] regionserver.HRegion(2837): Flushing 59fa39bbf7af22b65ee5a2abca8580d1 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-11-28T09:23:51,434 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=188}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59fa39bbf7af22b65ee5a2abca8580d1, store=A 2024-11-28T09:23:51,434 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=188}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:51,434 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=188}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59fa39bbf7af22b65ee5a2abca8580d1, store=B 2024-11-28T09:23:51,434 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=188}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:51,434 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=188}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 59fa39bbf7af22b65ee5a2abca8580d1, store=C 2024-11-28T09:23:51,434 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=188}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-28T09:23:51,438 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=188}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128f26eb4093c7049b9a2cd3ccbfce64c7f_59fa39bbf7af22b65ee5a2abca8580d1 is 50, key is test_row_1/A:col10/1732785831253/Put/seqid=0 2024-11-28T09:23:51,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742623_1799 (size=9914) 2024-11-28T09:23:51,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=185 2024-11-28T09:23:51,842 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=188}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T09:23:51,845 INFO [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=188}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128f26eb4093c7049b9a2cd3ccbfce64c7f_59fa39bbf7af22b65ee5a2abca8580d1 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128f26eb4093c7049b9a2cd3ccbfce64c7f_59fa39bbf7af22b65ee5a2abca8580d1 2024-11-28T09:23:51,845 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=188}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/c9e937b6c587471785744d768f5a0293, store: [table=TestAcidGuarantees family=A region=59fa39bbf7af22b65ee5a2abca8580d1] 2024-11-28T09:23:51,846 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=188}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/c9e937b6c587471785744d768f5a0293 is 175, key is test_row_1/A:col10/1732785831253/Put/seqid=0 2024-11-28T09:23:51,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742624_1800 (size=22561) 2024-11-28T09:23:51,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=185 2024-11-28T09:23:52,250 INFO [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=188}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=454, memsize=8.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/c9e937b6c587471785744d768f5a0293 2024-11-28T09:23:52,255 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=188}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/B/439e7011a930423493b868e76f8e8f07 is 50, key is test_row_1/B:col10/1732785831253/Put/seqid=0 2024-11-28T09:23:52,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742625_1801 (size=9857) 2024-11-28T09:23:52,269 INFO [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=188}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=454 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/B/439e7011a930423493b868e76f8e8f07 2024-11-28T09:23:52,276 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=188}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/C/131c28b6b4d74c218a1de4bffbab3085 is 50, key is test_row_1/C:col10/1732785831253/Put/seqid=0 2024-11-28T09:23:52,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742626_1802 (size=9857) 2024-11-28T09:23:52,293 INFO [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=188}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=454 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/C/131c28b6b4d74c218a1de4bffbab3085 2024-11-28T09:23:52,296 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=188}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/A/c9e937b6c587471785744d768f5a0293 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/c9e937b6c587471785744d768f5a0293 2024-11-28T09:23:52,299 INFO [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=188}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/c9e937b6c587471785744d768f5a0293, entries=100, sequenceid=454, filesize=22.0 K 2024-11-28T09:23:52,299 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=188}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/B/439e7011a930423493b868e76f8e8f07 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/439e7011a930423493b868e76f8e8f07 2024-11-28T09:23:52,302 INFO [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=188}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/439e7011a930423493b868e76f8e8f07, entries=100, sequenceid=454, filesize=9.6 K 2024-11-28T09:23:52,302 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=188}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/.tmp/C/131c28b6b4d74c218a1de4bffbab3085 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/131c28b6b4d74c218a1de4bffbab3085 2024-11-28T09:23:52,306 INFO [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=188}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/131c28b6b4d74c218a1de4bffbab3085, entries=100, sequenceid=454, filesize=9.6 K 2024-11-28T09:23:52,307 INFO [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=188}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=0 B/0 for 59fa39bbf7af22b65ee5a2abca8580d1 in 873ms, sequenceid=454, compaction requested=false 2024-11-28T09:23:52,308 DEBUG [StoreCloser-TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/53474051c55544009d772b9412fbe473, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/1e75fc971808471788836ca82218a1e4, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/b52037ab39274300b57d25edb9a25ac4, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/3d30b4c8a67447849390082449be6957, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/bad8f58fd48c44bb815b0a192e06ecde, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/03ace9dbe25f4a3288c47d85b2d41260, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/31e7439af3024478bcb404c72f2ec291, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/cc306f1fc84745579266269d599d26df, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/fc8db60454b54dd196f1013e722762e9, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/68365344f4ce446fba3eda611768d7a2, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/fc29ca6f368d4d7f95c862f27b5d2813, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/92d685df025b4f48ad785f7d47651752, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/dc2a6c6c0ad942f5a0b72bb6c823af51, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/40fa901fe07c4e9da006d64742f15498, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/5aa2c4e274b44ceea874897ea61ed9b7, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/2b01caae08c847e99ca6f330462da3b4, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/0ae7aa7f13be4f3a8745a16976272ecb, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/26891d71e15d4757bb94d1b73eb9ebc4, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/dc36ee46f2b14c048ded9069db6d4ee3, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/01cf38d0e4d64ecf87f9cad82b32398b, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/bb2af9a6bfad448aa32d5074a34f029e, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/573f5c378130441cafea97ea84953bf4, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/7c811cc370dc4492aa383067529be2aa, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/80e1db10708941f38003b0d9844c6365, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/c50ed4d76d294b03a5882093e6a7f321, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/c4cf759f9aca49178e72ccdee70c608c, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/d3e6e3264914495194da1efd8f5b680a, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/43009b1219b04e58bc21b57ef339be50, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/85c57e9e71424766bb396a873f198aae, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/243f437bc98544a8a41bfce847a9fd77, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/e2146ffaa3404c0e8067ee11cace9d95] to archive 2024-11-28T09:23:52,312 DEBUG [StoreCloser-TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-28T09:23:52,313 DEBUG [StoreCloser-TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/53474051c55544009d772b9412fbe473 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/53474051c55544009d772b9412fbe473 2024-11-28T09:23:52,316 DEBUG [StoreCloser-TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/1e75fc971808471788836ca82218a1e4 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/1e75fc971808471788836ca82218a1e4 2024-11-28T09:23:52,317 DEBUG [StoreCloser-TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/b52037ab39274300b57d25edb9a25ac4 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/b52037ab39274300b57d25edb9a25ac4 2024-11-28T09:23:52,320 DEBUG [StoreCloser-TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/3d30b4c8a67447849390082449be6957 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/3d30b4c8a67447849390082449be6957 2024-11-28T09:23:52,321 DEBUG [StoreCloser-TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/bad8f58fd48c44bb815b0a192e06ecde to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/bad8f58fd48c44bb815b0a192e06ecde 2024-11-28T09:23:52,323 DEBUG [StoreCloser-TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/03ace9dbe25f4a3288c47d85b2d41260 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/03ace9dbe25f4a3288c47d85b2d41260 2024-11-28T09:23:52,324 DEBUG [StoreCloser-TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/31e7439af3024478bcb404c72f2ec291 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/31e7439af3024478bcb404c72f2ec291 2024-11-28T09:23:52,329 DEBUG [StoreCloser-TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/cc306f1fc84745579266269d599d26df to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/cc306f1fc84745579266269d599d26df 2024-11-28T09:23:52,329 DEBUG [StoreCloser-TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/fc8db60454b54dd196f1013e722762e9 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/fc8db60454b54dd196f1013e722762e9 2024-11-28T09:23:52,331 DEBUG [StoreCloser-TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/68365344f4ce446fba3eda611768d7a2 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/68365344f4ce446fba3eda611768d7a2 2024-11-28T09:23:52,336 DEBUG [StoreCloser-TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/fc29ca6f368d4d7f95c862f27b5d2813 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/fc29ca6f368d4d7f95c862f27b5d2813 2024-11-28T09:23:52,337 DEBUG [StoreCloser-TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/92d685df025b4f48ad785f7d47651752 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/92d685df025b4f48ad785f7d47651752 2024-11-28T09:23:52,338 DEBUG [StoreCloser-TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/dc2a6c6c0ad942f5a0b72bb6c823af51 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/dc2a6c6c0ad942f5a0b72bb6c823af51 2024-11-28T09:23:52,340 DEBUG [StoreCloser-TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/40fa901fe07c4e9da006d64742f15498 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/40fa901fe07c4e9da006d64742f15498 2024-11-28T09:23:52,341 DEBUG [StoreCloser-TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/5aa2c4e274b44ceea874897ea61ed9b7 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/5aa2c4e274b44ceea874897ea61ed9b7 2024-11-28T09:23:52,342 DEBUG [StoreCloser-TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/2b01caae08c847e99ca6f330462da3b4 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/2b01caae08c847e99ca6f330462da3b4 2024-11-28T09:23:52,343 DEBUG [StoreCloser-TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/0ae7aa7f13be4f3a8745a16976272ecb to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/0ae7aa7f13be4f3a8745a16976272ecb 2024-11-28T09:23:52,348 DEBUG [StoreCloser-TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/26891d71e15d4757bb94d1b73eb9ebc4 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/26891d71e15d4757bb94d1b73eb9ebc4 2024-11-28T09:23:52,349 DEBUG [StoreCloser-TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/dc36ee46f2b14c048ded9069db6d4ee3 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/dc36ee46f2b14c048ded9069db6d4ee3 2024-11-28T09:23:52,350 DEBUG [StoreCloser-TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/01cf38d0e4d64ecf87f9cad82b32398b to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/01cf38d0e4d64ecf87f9cad82b32398b 2024-11-28T09:23:52,351 DEBUG [StoreCloser-TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/bb2af9a6bfad448aa32d5074a34f029e to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/bb2af9a6bfad448aa32d5074a34f029e 2024-11-28T09:23:52,352 DEBUG [StoreCloser-TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/573f5c378130441cafea97ea84953bf4 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/573f5c378130441cafea97ea84953bf4 2024-11-28T09:23:52,352 DEBUG [StoreCloser-TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/7c811cc370dc4492aa383067529be2aa to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/7c811cc370dc4492aa383067529be2aa 2024-11-28T09:23:52,353 DEBUG [StoreCloser-TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/80e1db10708941f38003b0d9844c6365 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/80e1db10708941f38003b0d9844c6365 2024-11-28T09:23:52,354 DEBUG [StoreCloser-TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/c50ed4d76d294b03a5882093e6a7f321 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/c50ed4d76d294b03a5882093e6a7f321 2024-11-28T09:23:52,356 DEBUG [StoreCloser-TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/c4cf759f9aca49178e72ccdee70c608c to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/c4cf759f9aca49178e72ccdee70c608c 2024-11-28T09:23:52,357 DEBUG [StoreCloser-TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/d3e6e3264914495194da1efd8f5b680a to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/d3e6e3264914495194da1efd8f5b680a 2024-11-28T09:23:52,358 DEBUG [StoreCloser-TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/43009b1219b04e58bc21b57ef339be50 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/43009b1219b04e58bc21b57ef339be50 2024-11-28T09:23:52,359 DEBUG [StoreCloser-TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/85c57e9e71424766bb396a873f198aae to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/85c57e9e71424766bb396a873f198aae 2024-11-28T09:23:52,359 DEBUG [StoreCloser-TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/243f437bc98544a8a41bfce847a9fd77 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/243f437bc98544a8a41bfce847a9fd77 2024-11-28T09:23:52,360 DEBUG [StoreCloser-TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/e2146ffaa3404c0e8067ee11cace9d95 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/e2146ffaa3404c0e8067ee11cace9d95 2024-11-28T09:23:52,362 DEBUG [StoreCloser-TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/2e4841ab3b02425cbbb50d1feffd4a96, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/1d1e31b6609d4949ab5cbeb25abc8013, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/be4d0be1cf1944f7b84194034d58979b, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/078a0d2f06ae41f9aba67aaa30764bbf, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/15123368771e4bc5b2ec278e3935dbb8, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/77b8cab9f0b54c9ab54c8137a4ee99c8, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/f6bce462935740fdadfba653721d1cdb, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/6c78e266ca3147859289726cc22122f4, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/8bf32f3fca90430392061ebbde956d9b, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/664acb26643348438b65f43d30cc7b79, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/854879c7ce774d96aa9bb180478628bb, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/f0e54a1cd5294af886768c799968d784, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/f9e7a1f9c7a64e42a3ca85531da73066, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/8bb76a3166b847f3afb939584900d0b6, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/6f936f5cfe574c399c377a68a5d3e6df, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/db06100243ce4785afe2e70b55b7aeea, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/a70c4912253043c594885211df95f68b, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/6e74bf8e645b4c3f989cf609c3b3dad5, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/e399338301ca4f048809e395c5900425, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/19283596fdab48c3b50004d7f0c1ad66, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/bcd07dfcb8f5401796abf31446a2ce36, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/5484b2e3bfca4576a422c2e7e2bb9149, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/1c78efa04d7f4ec1a45ee1e1c67e5185, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/67f82bf2a8b348aaba32836a5a610c66, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/3472e918b0344c27ab589decfc2cd3a6, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/0855790d6c304cf8b92256c04af28b59, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/6fe20ee1edaf4d2bb9029e5a34d896ed, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/d2f3c92e85da4f0f899882685d69fc5b, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/a8a3a30828cb42d585cc68c93c145bf1, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/2bd459e3dccf446ea8906fbd80ccf228, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/0bfe8861dc194f9e99ea8ad6a9d5ed66] to archive 2024-11-28T09:23:52,363 DEBUG [StoreCloser-TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-28T09:23:52,364 DEBUG [StoreCloser-TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/2e4841ab3b02425cbbb50d1feffd4a96 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/2e4841ab3b02425cbbb50d1feffd4a96 2024-11-28T09:23:52,365 DEBUG [StoreCloser-TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/1d1e31b6609d4949ab5cbeb25abc8013 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/1d1e31b6609d4949ab5cbeb25abc8013 2024-11-28T09:23:52,366 DEBUG [StoreCloser-TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/be4d0be1cf1944f7b84194034d58979b to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/be4d0be1cf1944f7b84194034d58979b 2024-11-28T09:23:52,367 DEBUG [StoreCloser-TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/078a0d2f06ae41f9aba67aaa30764bbf to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/078a0d2f06ae41f9aba67aaa30764bbf 2024-11-28T09:23:52,371 DEBUG [StoreCloser-TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/15123368771e4bc5b2ec278e3935dbb8 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/15123368771e4bc5b2ec278e3935dbb8 2024-11-28T09:23:52,375 DEBUG [StoreCloser-TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/77b8cab9f0b54c9ab54c8137a4ee99c8 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/77b8cab9f0b54c9ab54c8137a4ee99c8 2024-11-28T09:23:52,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=185 2024-11-28T09:23:52,381 DEBUG [StoreCloser-TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/f6bce462935740fdadfba653721d1cdb to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/f6bce462935740fdadfba653721d1cdb 2024-11-28T09:23:52,383 DEBUG [StoreCloser-TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/6c78e266ca3147859289726cc22122f4 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/6c78e266ca3147859289726cc22122f4 2024-11-28T09:23:52,384 DEBUG [StoreCloser-TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/8bf32f3fca90430392061ebbde956d9b to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/8bf32f3fca90430392061ebbde956d9b 2024-11-28T09:23:52,385 DEBUG [StoreCloser-TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/664acb26643348438b65f43d30cc7b79 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/664acb26643348438b65f43d30cc7b79 2024-11-28T09:23:52,387 DEBUG [StoreCloser-TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/854879c7ce774d96aa9bb180478628bb to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/854879c7ce774d96aa9bb180478628bb 2024-11-28T09:23:52,387 DEBUG [StoreCloser-TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/f0e54a1cd5294af886768c799968d784 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/f0e54a1cd5294af886768c799968d784 2024-11-28T09:23:52,389 DEBUG [StoreCloser-TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/f9e7a1f9c7a64e42a3ca85531da73066 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/f9e7a1f9c7a64e42a3ca85531da73066 2024-11-28T09:23:52,390 DEBUG [StoreCloser-TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/8bb76a3166b847f3afb939584900d0b6 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/8bb76a3166b847f3afb939584900d0b6 2024-11-28T09:23:52,393 DEBUG [StoreCloser-TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/6f936f5cfe574c399c377a68a5d3e6df to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/6f936f5cfe574c399c377a68a5d3e6df 2024-11-28T09:23:52,394 DEBUG [StoreCloser-TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/db06100243ce4785afe2e70b55b7aeea to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/db06100243ce4785afe2e70b55b7aeea 2024-11-28T09:23:52,395 DEBUG [StoreCloser-TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/a70c4912253043c594885211df95f68b to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/a70c4912253043c594885211df95f68b 2024-11-28T09:23:52,398 DEBUG [StoreCloser-TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/6e74bf8e645b4c3f989cf609c3b3dad5 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/6e74bf8e645b4c3f989cf609c3b3dad5 2024-11-28T09:23:52,399 DEBUG [StoreCloser-TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/e399338301ca4f048809e395c5900425 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/e399338301ca4f048809e395c5900425 2024-11-28T09:23:52,400 DEBUG [StoreCloser-TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/19283596fdab48c3b50004d7f0c1ad66 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/19283596fdab48c3b50004d7f0c1ad66 2024-11-28T09:23:52,404 DEBUG [StoreCloser-TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/bcd07dfcb8f5401796abf31446a2ce36 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/bcd07dfcb8f5401796abf31446a2ce36 2024-11-28T09:23:52,405 DEBUG [StoreCloser-TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/5484b2e3bfca4576a422c2e7e2bb9149 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/5484b2e3bfca4576a422c2e7e2bb9149 2024-11-28T09:23:52,406 DEBUG [StoreCloser-TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/1c78efa04d7f4ec1a45ee1e1c67e5185 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/1c78efa04d7f4ec1a45ee1e1c67e5185 2024-11-28T09:23:52,407 DEBUG [StoreCloser-TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/67f82bf2a8b348aaba32836a5a610c66 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/67f82bf2a8b348aaba32836a5a610c66 2024-11-28T09:23:52,408 DEBUG [StoreCloser-TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/3472e918b0344c27ab589decfc2cd3a6 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/3472e918b0344c27ab589decfc2cd3a6 2024-11-28T09:23:52,410 DEBUG [StoreCloser-TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/0855790d6c304cf8b92256c04af28b59 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/0855790d6c304cf8b92256c04af28b59 2024-11-28T09:23:52,411 DEBUG [StoreCloser-TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/6fe20ee1edaf4d2bb9029e5a34d896ed to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/6fe20ee1edaf4d2bb9029e5a34d896ed 2024-11-28T09:23:52,412 DEBUG [StoreCloser-TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/d2f3c92e85da4f0f899882685d69fc5b to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/d2f3c92e85da4f0f899882685d69fc5b 2024-11-28T09:23:52,413 DEBUG [StoreCloser-TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/a8a3a30828cb42d585cc68c93c145bf1 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/a8a3a30828cb42d585cc68c93c145bf1 2024-11-28T09:23:52,414 DEBUG [StoreCloser-TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/2bd459e3dccf446ea8906fbd80ccf228 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/2bd459e3dccf446ea8906fbd80ccf228 2024-11-28T09:23:52,415 DEBUG [StoreCloser-TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/0bfe8861dc194f9e99ea8ad6a9d5ed66 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/0bfe8861dc194f9e99ea8ad6a9d5ed66 2024-11-28T09:23:52,417 DEBUG [StoreCloser-TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/a66bf0d88eb84a099073de78f0a9a4b1, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/262c44a0a7404bf3b4dae265083b33e1, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/a9be6cbe4a21488fa508b017639b78bd, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/18ba966a00014453bcff87bb77841ad2, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/a5b5bdd67e23430889655c7c7398313e, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/7b76ede38c914ddabae4c4bb65e893e0, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/58501be2adff41b29537eb92a4d4e080, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/060ac75e7bff42c6b6b1087fea8d39a2, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/2160468e218f42c6bdf93d0ff1e45ee4, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/fb80aa85f98443bd97e892c89ef489d0, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/c8ee2fd6158d4f4196703b83a5a758b6, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/372184545bd848f2b75343a108a9c845, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/a01c8e96fd384b919f73697e1d4fa281, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/9bb5cf2dfca04680b160eae70f374cd8, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/858ff778ec174426ab09f0cf756ea8b0, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/95606fc0b5a74e479f76905ac6bc9d77, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/8cfdb1727e784476a7239ca2e3f38836, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/dcbd627ec4224641844fe33768bc2051, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/29966b01de034a9997e3b8791b3f5a94, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/a6d73812c3ac4ae586f28fd8f50b806a, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/7cfe7782dbe741c59b8648e551d6ecb0, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/33b65e3b8081438d9cbb9f4a77b06d5c, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/1393c59b11cc4b658a80c2375943529d, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/2ec0ae66f96040a9831bf97dce8efb86, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/deecdec0efad400582fa3133fa1e323d, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/d091f3d3598d474d81c752540b359629, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/2ff3a8ebca3845d2bed8a72f2eda29b8, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/efe078bd778548818a2b2d3edd352acb, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/3d6212518b744148b117354c8daca2f9, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/6ce8b808dd81427a9b2f880f990947cd] to archive 2024-11-28T09:23:52,418 DEBUG [StoreCloser-TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-28T09:23:52,419 DEBUG [StoreCloser-TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/a66bf0d88eb84a099073de78f0a9a4b1 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/a66bf0d88eb84a099073de78f0a9a4b1 2024-11-28T09:23:52,420 DEBUG [StoreCloser-TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/262c44a0a7404bf3b4dae265083b33e1 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/262c44a0a7404bf3b4dae265083b33e1 2024-11-28T09:23:52,421 DEBUG [StoreCloser-TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/a9be6cbe4a21488fa508b017639b78bd to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/a9be6cbe4a21488fa508b017639b78bd 2024-11-28T09:23:52,422 DEBUG [StoreCloser-TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/18ba966a00014453bcff87bb77841ad2 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/18ba966a00014453bcff87bb77841ad2 2024-11-28T09:23:52,423 DEBUG [StoreCloser-TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/a5b5bdd67e23430889655c7c7398313e to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/a5b5bdd67e23430889655c7c7398313e 2024-11-28T09:23:52,425 DEBUG [StoreCloser-TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/7b76ede38c914ddabae4c4bb65e893e0 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/7b76ede38c914ddabae4c4bb65e893e0 2024-11-28T09:23:52,426 DEBUG [StoreCloser-TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/58501be2adff41b29537eb92a4d4e080 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/58501be2adff41b29537eb92a4d4e080 2024-11-28T09:23:52,427 DEBUG [StoreCloser-TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/060ac75e7bff42c6b6b1087fea8d39a2 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/060ac75e7bff42c6b6b1087fea8d39a2 2024-11-28T09:23:52,428 DEBUG [StoreCloser-TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/2160468e218f42c6bdf93d0ff1e45ee4 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/2160468e218f42c6bdf93d0ff1e45ee4 2024-11-28T09:23:52,429 DEBUG [StoreCloser-TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/fb80aa85f98443bd97e892c89ef489d0 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/fb80aa85f98443bd97e892c89ef489d0 2024-11-28T09:23:52,429 DEBUG [StoreCloser-TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/c8ee2fd6158d4f4196703b83a5a758b6 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/c8ee2fd6158d4f4196703b83a5a758b6 2024-11-28T09:23:52,430 DEBUG [StoreCloser-TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/372184545bd848f2b75343a108a9c845 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/372184545bd848f2b75343a108a9c845 2024-11-28T09:23:52,431 DEBUG [StoreCloser-TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/a01c8e96fd384b919f73697e1d4fa281 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/a01c8e96fd384b919f73697e1d4fa281 2024-11-28T09:23:52,432 DEBUG [StoreCloser-TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/9bb5cf2dfca04680b160eae70f374cd8 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/9bb5cf2dfca04680b160eae70f374cd8 2024-11-28T09:23:52,433 DEBUG [StoreCloser-TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/858ff778ec174426ab09f0cf756ea8b0 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/858ff778ec174426ab09f0cf756ea8b0 2024-11-28T09:23:52,434 DEBUG [StoreCloser-TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/95606fc0b5a74e479f76905ac6bc9d77 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/95606fc0b5a74e479f76905ac6bc9d77 2024-11-28T09:23:52,435 DEBUG [StoreCloser-TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/8cfdb1727e784476a7239ca2e3f38836 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/8cfdb1727e784476a7239ca2e3f38836 2024-11-28T09:23:52,436 DEBUG [StoreCloser-TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/dcbd627ec4224641844fe33768bc2051 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/dcbd627ec4224641844fe33768bc2051 2024-11-28T09:23:52,437 DEBUG [StoreCloser-TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/29966b01de034a9997e3b8791b3f5a94 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/29966b01de034a9997e3b8791b3f5a94 2024-11-28T09:23:52,437 DEBUG [StoreCloser-TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/a6d73812c3ac4ae586f28fd8f50b806a to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/a6d73812c3ac4ae586f28fd8f50b806a 2024-11-28T09:23:52,438 DEBUG [StoreCloser-TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/7cfe7782dbe741c59b8648e551d6ecb0 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/7cfe7782dbe741c59b8648e551d6ecb0 2024-11-28T09:23:52,439 DEBUG [StoreCloser-TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/33b65e3b8081438d9cbb9f4a77b06d5c to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/33b65e3b8081438d9cbb9f4a77b06d5c 2024-11-28T09:23:52,440 DEBUG [StoreCloser-TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/1393c59b11cc4b658a80c2375943529d to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/1393c59b11cc4b658a80c2375943529d 2024-11-28T09:23:52,441 DEBUG [StoreCloser-TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/2ec0ae66f96040a9831bf97dce8efb86 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/2ec0ae66f96040a9831bf97dce8efb86 2024-11-28T09:23:52,442 DEBUG [StoreCloser-TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/deecdec0efad400582fa3133fa1e323d to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/deecdec0efad400582fa3133fa1e323d 2024-11-28T09:23:52,443 DEBUG [StoreCloser-TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/d091f3d3598d474d81c752540b359629 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/d091f3d3598d474d81c752540b359629 2024-11-28T09:23:52,443 DEBUG [StoreCloser-TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/2ff3a8ebca3845d2bed8a72f2eda29b8 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/2ff3a8ebca3845d2bed8a72f2eda29b8 2024-11-28T09:23:52,445 DEBUG [StoreCloser-TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/efe078bd778548818a2b2d3edd352acb to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/efe078bd778548818a2b2d3edd352acb 2024-11-28T09:23:52,446 DEBUG [StoreCloser-TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/3d6212518b744148b117354c8daca2f9 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/3d6212518b744148b117354c8daca2f9 2024-11-28T09:23:52,446 DEBUG [StoreCloser-TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/6ce8b808dd81427a9b2f880f990947cd to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/6ce8b808dd81427a9b2f880f990947cd 2024-11-28T09:23:52,450 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=188}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/recovered.edits/457.seqid, newMaxSeqId=457, maxSeqId=4 2024-11-28T09:23:52,450 INFO [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=188}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1. 2024-11-28T09:23:52,450 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=188}] regionserver.HRegion(1635): Region close journal for 59fa39bbf7af22b65ee5a2abca8580d1: 2024-11-28T09:23:52,452 INFO [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION, pid=188}] handler.UnassignRegionHandler(170): Closed 59fa39bbf7af22b65ee5a2abca8580d1 2024-11-28T09:23:52,452 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=187 updating hbase:meta row=59fa39bbf7af22b65ee5a2abca8580d1, regionState=CLOSED 2024-11-28T09:23:52,455 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=188, resume processing ppid=187 2024-11-28T09:23:52,455 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=188, ppid=187, state=SUCCESS; CloseRegionProcedure 59fa39bbf7af22b65ee5a2abca8580d1, server=363d8d38a970,33819,1732785660637 in 1.1720 sec 2024-11-28T09:23:52,457 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=187, resume processing ppid=186 2024-11-28T09:23:52,457 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=187, ppid=186, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=59fa39bbf7af22b65ee5a2abca8580d1, UNASSIGN in 1.1750 sec 2024-11-28T09:23:52,459 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=186, resume processing ppid=185 2024-11-28T09:23:52,459 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=186, ppid=185, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.1780 sec 2024-11-28T09:23:52,460 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732785832460"}]},"ts":"1732785832460"} 2024-11-28T09:23:52,461 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-28T09:23:52,463 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-28T09:23:52,464 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=185, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.1910 sec 2024-11-28T09:23:53,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=185 2024-11-28T09:23:53,379 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 185 completed 2024-11-28T09:23:53,380 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete TestAcidGuarantees 2024-11-28T09:23:53,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] procedure2.ProcedureExecutor(1098): Stored pid=189, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-28T09:23:53,381 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=189, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-28T09:23:53,382 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=189, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-28T09:23:53,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=189 2024-11-28T09:23:53,384 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1 2024-11-28T09:23:53,387 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A, FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B, FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C, FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/recovered.edits] 2024-11-28T09:23:53,391 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/b587d6c87e5a496a8272e9db2a2c80c6 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/b587d6c87e5a496a8272e9db2a2c80c6 2024-11-28T09:23:53,395 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/c9e937b6c587471785744d768f5a0293 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/A/c9e937b6c587471785744d768f5a0293 2024-11-28T09:23:53,397 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/439e7011a930423493b868e76f8e8f07 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/439e7011a930423493b868e76f8e8f07 2024-11-28T09:23:53,399 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/f6983d010ba44245a4d92b6858f5a4f4 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/B/f6983d010ba44245a4d92b6858f5a4f4 2024-11-28T09:23:53,403 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/065ae2f824b84c9ab670af11889268cd to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/065ae2f824b84c9ab670af11889268cd 2024-11-28T09:23:53,404 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/131c28b6b4d74c218a1de4bffbab3085 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/C/131c28b6b4d74c218a1de4bffbab3085 2024-11-28T09:23:53,411 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/recovered.edits/457.seqid to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1/recovered.edits/457.seqid 2024-11-28T09:23:53,412 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/default/TestAcidGuarantees/59fa39bbf7af22b65ee5a2abca8580d1 2024-11-28T09:23:53,412 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-28T09:23:53,416 DEBUG [PEWorker-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-28T09:23:53,417 DEBUG [PEWorker-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-11-28T09:23:53,422 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128025330d4c6c949469b8b4b0a724bf8ae_59fa39bbf7af22b65ee5a2abca8580d1 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128025330d4c6c949469b8b4b0a724bf8ae_59fa39bbf7af22b65ee5a2abca8580d1 2024-11-28T09:23:53,425 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112807bd94191b3e496191eae70369ed5454_59fa39bbf7af22b65ee5a2abca8580d1 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112807bd94191b3e496191eae70369ed5454_59fa39bbf7af22b65ee5a2abca8580d1 2024-11-28T09:23:53,426 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411280d69b2ca11cd4eddb4dbd63522336baf_59fa39bbf7af22b65ee5a2abca8580d1 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411280d69b2ca11cd4eddb4dbd63522336baf_59fa39bbf7af22b65ee5a2abca8580d1 2024-11-28T09:23:53,428 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411281b55da8ca8804aa181022405d1857f3b_59fa39bbf7af22b65ee5a2abca8580d1 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411281b55da8ca8804aa181022405d1857f3b_59fa39bbf7af22b65ee5a2abca8580d1 2024-11-28T09:23:53,429 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128223391f33b864c2b839bbd459bc20655_59fa39bbf7af22b65ee5a2abca8580d1 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128223391f33b864c2b839bbd459bc20655_59fa39bbf7af22b65ee5a2abca8580d1 2024-11-28T09:23:53,431 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411282c7aa5b0711240f98504ecf8747ca936_59fa39bbf7af22b65ee5a2abca8580d1 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411282c7aa5b0711240f98504ecf8747ca936_59fa39bbf7af22b65ee5a2abca8580d1 2024-11-28T09:23:53,432 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128393bbb86ad1d4de899326e9fe942df16_59fa39bbf7af22b65ee5a2abca8580d1 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128393bbb86ad1d4de899326e9fe942df16_59fa39bbf7af22b65ee5a2abca8580d1 2024-11-28T09:23:53,433 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411283c051ab2c3ac42479aef4ddc9878622d_59fa39bbf7af22b65ee5a2abca8580d1 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411283c051ab2c3ac42479aef4ddc9878622d_59fa39bbf7af22b65ee5a2abca8580d1 2024-11-28T09:23:53,436 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112840220e2af2af4777ba0ebcafecd6e10f_59fa39bbf7af22b65ee5a2abca8580d1 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112840220e2af2af4777ba0ebcafecd6e10f_59fa39bbf7af22b65ee5a2abca8580d1 2024-11-28T09:23:53,437 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411285581e5bdfac9487abcba579ab0a2804f_59fa39bbf7af22b65ee5a2abca8580d1 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411285581e5bdfac9487abcba579ab0a2804f_59fa39bbf7af22b65ee5a2abca8580d1 2024-11-28T09:23:53,438 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128580b58b8c22c4ef3bfda90e6a8da6be8_59fa39bbf7af22b65ee5a2abca8580d1 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128580b58b8c22c4ef3bfda90e6a8da6be8_59fa39bbf7af22b65ee5a2abca8580d1 2024-11-28T09:23:53,439 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411286f329a63615b4f1881c9df9938f23e68_59fa39bbf7af22b65ee5a2abca8580d1 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411286f329a63615b4f1881c9df9938f23e68_59fa39bbf7af22b65ee5a2abca8580d1 2024-11-28T09:23:53,440 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411286f93b7296b194ef09eb1c76d77929a91_59fa39bbf7af22b65ee5a2abca8580d1 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411286f93b7296b194ef09eb1c76d77929a91_59fa39bbf7af22b65ee5a2abca8580d1 2024-11-28T09:23:53,443 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112872eb69f9a4ca462e8c27f8fe93b8a7be_59fa39bbf7af22b65ee5a2abca8580d1 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024112872eb69f9a4ca462e8c27f8fe93b8a7be_59fa39bbf7af22b65ee5a2abca8580d1 2024-11-28T09:23:53,444 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411287daee126c81344919ab9473d4b6704ac_59fa39bbf7af22b65ee5a2abca8580d1 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411287daee126c81344919ab9473d4b6704ac_59fa39bbf7af22b65ee5a2abca8580d1 2024-11-28T09:23:53,445 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411288ba37554f1814361810356f602c6b8fb_59fa39bbf7af22b65ee5a2abca8580d1 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411288ba37554f1814361810356f602c6b8fb_59fa39bbf7af22b65ee5a2abca8580d1 2024-11-28T09:23:53,446 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411289ef188cd6a764dc8bee6591396d9865b_59fa39bbf7af22b65ee5a2abca8580d1 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411289ef188cd6a764dc8bee6591396d9865b_59fa39bbf7af22b65ee5a2abca8580d1 2024-11-28T09:23:53,447 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128aac094fb5a304c45b306a09813bfb067_59fa39bbf7af22b65ee5a2abca8580d1 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128aac094fb5a304c45b306a09813bfb067_59fa39bbf7af22b65ee5a2abca8580d1 2024-11-28T09:23:53,450 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128c7c53c3be9f64eb1a749d8e022518473_59fa39bbf7af22b65ee5a2abca8580d1 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128c7c53c3be9f64eb1a749d8e022518473_59fa39bbf7af22b65ee5a2abca8580d1 2024-11-28T09:23:53,451 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128ceda8f08e2c140c1846ed11ba8fb146e_59fa39bbf7af22b65ee5a2abca8580d1 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128ceda8f08e2c140c1846ed11ba8fb146e_59fa39bbf7af22b65ee5a2abca8580d1 2024-11-28T09:23:53,452 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128de025fbb729f400ea85b2d6ba5295f49_59fa39bbf7af22b65ee5a2abca8580d1 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128de025fbb729f400ea85b2d6ba5295f49_59fa39bbf7af22b65ee5a2abca8580d1 2024-11-28T09:23:53,453 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128eccb719cf25e4e99a62fc2564f215011_59fa39bbf7af22b65ee5a2abca8580d1 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128eccb719cf25e4e99a62fc2564f215011_59fa39bbf7af22b65ee5a2abca8580d1 2024-11-28T09:23:53,454 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128f26eb4093c7049b9a2cd3ccbfce64c7f_59fa39bbf7af22b65ee5a2abca8580d1 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128f26eb4093c7049b9a2cd3ccbfce64c7f_59fa39bbf7af22b65ee5a2abca8580d1 2024-11-28T09:23:53,456 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128feb8e49ac20340b981d3779eec8eb3eb_59fa39bbf7af22b65ee5a2abca8580d1 to hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241128feb8e49ac20340b981d3779eec8eb3eb_59fa39bbf7af22b65ee5a2abca8580d1 2024-11-28T09:23:53,458 DEBUG [PEWorker-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-28T09:23:53,460 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=189, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-28T09:23:53,464 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-28T09:23:53,465 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-28T09:23:53,466 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=189, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-28T09:23:53,466 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-28T09:23:53,466 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732785833466"}]},"ts":"9223372036854775807"} 2024-11-28T09:23:53,469 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-28T09:23:53,469 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 59fa39bbf7af22b65ee5a2abca8580d1, NAME => 'TestAcidGuarantees,,1732785799242.59fa39bbf7af22b65ee5a2abca8580d1.', STARTKEY => '', ENDKEY => ''}] 2024-11-28T09:23:53,469 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-28T09:23:53,469 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732785833469"}]},"ts":"9223372036854775807"} 2024-11-28T09:23:53,473 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-28T09:23:53,475 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=189, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-28T09:23:53,477 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=189, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 95 msec 2024-11-28T09:23:53,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34825 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=189 2024-11-28T09:23:53,483 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 189 completed 2024-11-28T09:23:53,494 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithBasicPolicy#testMobGetAtomicity Thread=237 (was 241), OpenFileDescriptor=451 (was 458), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=633 (was 685), ProcessCount=11 (was 11), AvailableMemoryMB=4062 (was 4168) 2024-11-28T09:23:53,494 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-11-28T09:23:53,494 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-28T09:23:53,494 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3771e354 to 127.0.0.1:53251 2024-11-28T09:23:53,494 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T09:23:53,495 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-28T09:23:53,495 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=479130374, stopped=false 2024-11-28T09:23:53,495 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=363d8d38a970,34825,1732785659868 2024-11-28T09:23:53,497 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-11-28T09:23:53,497 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34825-0x1003d6e958f0000, quorum=127.0.0.1:53251, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-28T09:23:53,497 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34825-0x1003d6e958f0000, quorum=127.0.0.1:53251, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T09:23:53,497 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33819-0x1003d6e958f0001, quorum=127.0.0.1:53251, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-28T09:23:53,498 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33819-0x1003d6e958f0001, quorum=127.0.0.1:53251, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T09:23:53,498 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T09:23:53,498 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:33819-0x1003d6e958f0001, quorum=127.0.0.1:53251, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-28T09:23:53,499 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:34825-0x1003d6e958f0000, quorum=127.0.0.1:53251, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-28T09:23:53,500 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '363d8d38a970,33819,1732785660637' ***** 2024-11-28T09:23:53,500 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-11-28T09:23:53,500 INFO [RS:0;363d8d38a970:33819 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-28T09:23:53,501 INFO [RS:0;363d8d38a970:33819 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-28T09:23:53,501 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-11-28T09:23:53,501 INFO [RS:0;363d8d38a970:33819 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-28T09:23:53,501 INFO [RS:0;363d8d38a970:33819 {}] regionserver.HRegionServer(3579): Received CLOSE for 4cf15397e80ca5505a26ba84c5dd3d7f 2024-11-28T09:23:53,502 INFO [RS:0;363d8d38a970:33819 {}] regionserver.HRegionServer(1224): stopping server 363d8d38a970,33819,1732785660637 2024-11-28T09:23:53,502 DEBUG [RS:0;363d8d38a970:33819 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T09:23:53,502 INFO [RS:0;363d8d38a970:33819 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-28T09:23:53,502 INFO [RS:0;363d8d38a970:33819 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-28T09:23:53,502 INFO [RS:0;363d8d38a970:33819 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-28T09:23:53,502 INFO [RS:0;363d8d38a970:33819 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-11-28T09:23:53,502 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 4cf15397e80ca5505a26ba84c5dd3d7f, disabling compactions & flushes 2024-11-28T09:23:53,502 INFO [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1732785663892.4cf15397e80ca5505a26ba84c5dd3d7f. 2024-11-28T09:23:53,502 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1732785663892.4cf15397e80ca5505a26ba84c5dd3d7f. 2024-11-28T09:23:53,502 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1732785663892.4cf15397e80ca5505a26ba84c5dd3d7f. after waiting 0 ms 2024-11-28T09:23:53,502 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1732785663892.4cf15397e80ca5505a26ba84c5dd3d7f. 2024-11-28T09:23:53,502 INFO [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing 4cf15397e80ca5505a26ba84c5dd3d7f 1/1 column families, dataSize=78 B heapSize=488 B 2024-11-28T09:23:53,504 INFO [RS:0;363d8d38a970:33819 {}] regionserver.HRegionServer(1599): Waiting on 2 regions to close 2024-11-28T09:23:53,504 DEBUG [RS:0;363d8d38a970:33819 {}] regionserver.HRegionServer(1603): Online Regions={4cf15397e80ca5505a26ba84c5dd3d7f=hbase:namespace,,1732785663892.4cf15397e80ca5505a26ba84c5dd3d7f., 1588230740=hbase:meta,,1.1588230740} 2024-11-28T09:23:53,504 DEBUG [RS_CLOSE_META-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-11-28T09:23:53,504 INFO [RS_CLOSE_META-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-11-28T09:23:53,504 DEBUG [RS_CLOSE_META-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-11-28T09:23:53,504 DEBUG [RS_CLOSE_META-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-28T09:23:53,504 DEBUG [RS_CLOSE_META-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-11-28T09:23:53,505 INFO [RS_CLOSE_META-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=20.55 KB heapSize=35.87 KB 2024-11-28T09:23:53,505 DEBUG [RS:0;363d8d38a970:33819 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 4cf15397e80ca5505a26ba84c5dd3d7f 2024-11-28T09:23:53,537 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/hbase/namespace/4cf15397e80ca5505a26ba84c5dd3d7f/.tmp/info/70afde4cec31458f8adf97768f99b833 is 45, key is default/info:d/1732785665252/Put/seqid=0 2024-11-28T09:23:53,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742627_1803 (size=5037) 2024-11-28T09:23:53,548 DEBUG [RS_CLOSE_META-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/hbase/meta/1588230740/.tmp/info/7312dea6e4cf44628885a9d5a1fffe15 is 143, key is hbase:namespace,,1732785663892.4cf15397e80ca5505a26ba84c5dd3d7f./info:regioninfo/1732785665125/Put/seqid=0 2024-11-28T09:23:53,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742628_1804 (size=7725) 2024-11-28T09:23:53,553 INFO [RS_CLOSE_META-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/hbase/meta/1588230740/.tmp/info/7312dea6e4cf44628885a9d5a1fffe15 2024-11-28T09:23:53,575 DEBUG [RS_CLOSE_META-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/hbase/meta/1588230740/.tmp/rep_barrier/9c1d4bb5e7724d309297a2b173daab6e is 102, key is TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff./rep_barrier:/1732785691312/DeleteFamily/seqid=0 2024-11-28T09:23:53,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742629_1805 (size=6025) 2024-11-28T09:23:53,596 INFO [regionserver/363d8d38a970:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-28T09:23:53,620 INFO [regionserver/363d8d38a970:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-28T09:23:53,620 INFO [regionserver/363d8d38a970:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-28T09:23:53,705 DEBUG [RS:0;363d8d38a970:33819 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 4cf15397e80ca5505a26ba84c5dd3d7f 2024-11-28T09:23:53,906 DEBUG [RS:0;363d8d38a970:33819 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 4cf15397e80ca5505a26ba84c5dd3d7f 2024-11-28T09:23:53,948 INFO [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/hbase/namespace/4cf15397e80ca5505a26ba84c5dd3d7f/.tmp/info/70afde4cec31458f8adf97768f99b833 2024-11-28T09:23:53,956 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/hbase/namespace/4cf15397e80ca5505a26ba84c5dd3d7f/.tmp/info/70afde4cec31458f8adf97768f99b833 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/hbase/namespace/4cf15397e80ca5505a26ba84c5dd3d7f/info/70afde4cec31458f8adf97768f99b833 2024-11-28T09:23:53,963 INFO [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/hbase/namespace/4cf15397e80ca5505a26ba84c5dd3d7f/info/70afde4cec31458f8adf97768f99b833, entries=2, sequenceid=6, filesize=4.9 K 2024-11-28T09:23:53,964 INFO [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for 4cf15397e80ca5505a26ba84c5dd3d7f in 462ms, sequenceid=6, compaction requested=false 2024-11-28T09:23:53,986 INFO [RS_CLOSE_META-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=588 B at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/hbase/meta/1588230740/.tmp/rep_barrier/9c1d4bb5e7724d309297a2b173daab6e 2024-11-28T09:23:54,033 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/hbase/namespace/4cf15397e80ca5505a26ba84c5dd3d7f/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-28T09:23:54,034 INFO [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1732785663892.4cf15397e80ca5505a26ba84c5dd3d7f. 2024-11-28T09:23:54,034 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 4cf15397e80ca5505a26ba84c5dd3d7f: 2024-11-28T09:23:54,034 DEBUG [RS_CLOSE_REGION-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1732785663892.4cf15397e80ca5505a26ba84c5dd3d7f. 2024-11-28T09:23:54,052 DEBUG [RS_CLOSE_META-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/hbase/meta/1588230740/.tmp/table/8c94a2607f3d440eb25e981f3accb091 is 96, key is TestAcidGuarantees,,1732785665473.9324112e51bee406916a385aca28ddff./table:/1732785691312/DeleteFamily/seqid=0 2024-11-28T09:23:54,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742630_1806 (size=5942) 2024-11-28T09:23:54,095 INFO [RS_CLOSE_META-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.08 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/hbase/meta/1588230740/.tmp/table/8c94a2607f3d440eb25e981f3accb091 2024-11-28T09:23:54,103 DEBUG [RS_CLOSE_META-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/hbase/meta/1588230740/.tmp/info/7312dea6e4cf44628885a9d5a1fffe15 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/hbase/meta/1588230740/info/7312dea6e4cf44628885a9d5a1fffe15 2024-11-28T09:23:54,106 DEBUG [RS:0;363d8d38a970:33819 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-11-28T09:23:54,107 INFO [RS_CLOSE_META-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/hbase/meta/1588230740/info/7312dea6e4cf44628885a9d5a1fffe15, entries=22, sequenceid=93, filesize=7.5 K 2024-11-28T09:23:54,108 DEBUG [RS_CLOSE_META-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/hbase/meta/1588230740/.tmp/rep_barrier/9c1d4bb5e7724d309297a2b173daab6e as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/hbase/meta/1588230740/rep_barrier/9c1d4bb5e7724d309297a2b173daab6e 2024-11-28T09:23:54,113 INFO [RS_CLOSE_META-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/hbase/meta/1588230740/rep_barrier/9c1d4bb5e7724d309297a2b173daab6e, entries=6, sequenceid=93, filesize=5.9 K 2024-11-28T09:23:54,114 DEBUG [RS_CLOSE_META-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/hbase/meta/1588230740/.tmp/table/8c94a2607f3d440eb25e981f3accb091 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/hbase/meta/1588230740/table/8c94a2607f3d440eb25e981f3accb091 2024-11-28T09:23:54,120 INFO [RS_CLOSE_META-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/hbase/meta/1588230740/table/8c94a2607f3d440eb25e981f3accb091, entries=9, sequenceid=93, filesize=5.8 K 2024-11-28T09:23:54,123 INFO [RS_CLOSE_META-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3040): Finished flush of dataSize ~20.55 KB/21040, heapSize ~35.82 KB/36680, currentSize=0 B/0 for 1588230740 in 618ms, sequenceid=93, compaction requested=false 2024-11-28T09:23:54,140 DEBUG [RS_CLOSE_META-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/data/hbase/meta/1588230740/recovered.edits/96.seqid, newMaxSeqId=96, maxSeqId=1 2024-11-28T09:23:54,141 DEBUG [RS_CLOSE_META-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-28T09:23:54,141 INFO [RS_CLOSE_META-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-11-28T09:23:54,141 DEBUG [RS_CLOSE_META-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-11-28T09:23:54,141 DEBUG [RS_CLOSE_META-regionserver/363d8d38a970:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-28T09:23:54,306 INFO [RS:0;363d8d38a970:33819 {}] regionserver.HRegionServer(1250): stopping server 363d8d38a970,33819,1732785660637; all regions closed. 2024-11-28T09:23:54,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741834_1010 (size=26050) 2024-11-28T09:23:54,321 DEBUG [RS:0;363d8d38a970:33819 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/oldWALs 2024-11-28T09:23:54,321 INFO [RS:0;363d8d38a970:33819 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 363d8d38a970%2C33819%2C1732785660637.meta:.meta(num 1732785663627) 2024-11-28T09:23:54,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741832_1008 (size=18464604) 2024-11-28T09:23:54,346 DEBUG [RS:0;363d8d38a970:33819 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/oldWALs 2024-11-28T09:23:54,346 INFO [RS:0;363d8d38a970:33819 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 363d8d38a970%2C33819%2C1732785660637:(num 1732785662747) 2024-11-28T09:23:54,346 DEBUG [RS:0;363d8d38a970:33819 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T09:23:54,346 INFO [RS:0;363d8d38a970:33819 {}] regionserver.LeaseManager(133): Closed leases 2024-11-28T09:23:54,347 INFO [RS:0;363d8d38a970:33819 {}] hbase.ChoreService(370): Chore service for: regionserver/363d8d38a970:0 had [ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-28T09:23:54,348 INFO [RS:0;363d8d38a970:33819 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:33819 2024-11-28T09:23:54,350 INFO [regionserver/363d8d38a970:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-11-28T09:23:54,355 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33819-0x1003d6e958f0001, quorum=127.0.0.1:53251, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/363d8d38a970,33819,1732785660637 2024-11-28T09:23:54,355 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34825-0x1003d6e958f0000, quorum=127.0.0.1:53251, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-28T09:23:54,355 ERROR [Time-limited test-EventThread {}] zookeeper.ClientCnxn$EventThread(581): Error while calling watcher. java.util.concurrent.RejectedExecutionException: Task org.apache.hadoop.hbase.trace.TraceUtil$$Lambda$359/0x00007fb4348f1d48@7c4e7960 rejected from java.util.concurrent.ThreadPoolExecutor@394ce88d[Shutting down, pool size = 1, active threads = 0, queued tasks = 0, completed tasks = 15] at java.util.concurrent.ThreadPoolExecutor$AbortPolicy.rejectedExecution(ThreadPoolExecutor.java:2065) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.reject(ThreadPoolExecutor.java:833) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.execute(ThreadPoolExecutor.java:1360) ~[?:?] at java.util.concurrent.Executors$DelegatedExecutorService.execute(Executors.java:721) ~[?:?] at org.apache.hadoop.hbase.zookeeper.ZKWatcher.process(ZKWatcher.java:613) ~[hbase-zookeeper-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.zookeeper.ClientCnxn$EventThread.processEvent(ClientCnxn.java:579) ~[zookeeper-3.8.4.jar:3.8.4] at org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:554) ~[zookeeper-3.8.4.jar:3.8.4] 2024-11-28T09:23:54,358 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [363d8d38a970,33819,1732785660637] 2024-11-28T09:23:54,358 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 363d8d38a970,33819,1732785660637; numProcessing=1 2024-11-28T09:23:54,359 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/363d8d38a970,33819,1732785660637 already deleted, retry=false 2024-11-28T09:23:54,359 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 363d8d38a970,33819,1732785660637 expired; onlineServers=0 2024-11-28T09:23:54,359 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server '363d8d38a970,34825,1732785659868' ***** 2024-11-28T09:23:54,359 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-28T09:23:54,360 DEBUG [M:0;363d8d38a970:34825 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6132fdd7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=363d8d38a970/172.17.0.2:0 2024-11-28T09:23:54,360 INFO [M:0;363d8d38a970:34825 {}] regionserver.HRegionServer(1224): stopping server 363d8d38a970,34825,1732785659868 2024-11-28T09:23:54,360 INFO [M:0;363d8d38a970:34825 {}] regionserver.HRegionServer(1250): stopping server 363d8d38a970,34825,1732785659868; all regions closed. 2024-11-28T09:23:54,360 DEBUG [M:0;363d8d38a970:34825 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T09:23:54,360 DEBUG [M:0;363d8d38a970:34825 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-28T09:23:54,360 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-28T09:23:54,360 DEBUG [M:0;363d8d38a970:34825 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-28T09:23:54,360 DEBUG [master/363d8d38a970:0:becomeActiveMaster-HFileCleaner.small.0-1732785662389 {}] cleaner.HFileCleaner(306): Exit Thread[master/363d8d38a970:0:becomeActiveMaster-HFileCleaner.small.0-1732785662389,5,FailOnTimeoutGroup] 2024-11-28T09:23:54,360 DEBUG [master/363d8d38a970:0:becomeActiveMaster-HFileCleaner.large.0-1732785662384 {}] cleaner.HFileCleaner(306): Exit Thread[master/363d8d38a970:0:becomeActiveMaster-HFileCleaner.large.0-1732785662384,5,FailOnTimeoutGroup] 2024-11-28T09:23:54,360 INFO [M:0;363d8d38a970:34825 {}] hbase.ChoreService(370): Chore service for: master/363d8d38a970:0 had [] on shutdown 2024-11-28T09:23:54,361 DEBUG [M:0;363d8d38a970:34825 {}] master.HMaster(1733): Stopping service threads 2024-11-28T09:23:54,361 INFO [M:0;363d8d38a970:34825 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-28T09:23:54,361 ERROR [M:0;363d8d38a970:34825 {}] procedure2.ProcedureExecutor(722): There are still active thread in group java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10], see STDOUT java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10] Thread[IPC Client (1308410445) connection to localhost/127.0.0.1:33549 from jenkins,5,PEWorkerGroup] Thread[IPC Parameter Sending Thread for localhost/127.0.0.1:33549,5,PEWorkerGroup] Thread[HFileArchiver-6,5,PEWorkerGroup] 2024-11-28T09:23:54,362 INFO [M:0;363d8d38a970:34825 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-28T09:23:54,362 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34825-0x1003d6e958f0000, quorum=127.0.0.1:53251, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-28T09:23:54,362 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34825-0x1003d6e958f0000, quorum=127.0.0.1:53251, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T09:23:54,363 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-28T09:23:54,364 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:34825-0x1003d6e958f0000, quorum=127.0.0.1:53251, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-28T09:23:54,365 DEBUG [M:0;363d8d38a970:34825 {}] zookeeper.ZKUtil(347): master:34825-0x1003d6e958f0000, quorum=127.0.0.1:53251, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-28T09:23:54,365 WARN [M:0;363d8d38a970:34825 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-28T09:23:54,365 INFO [M:0;363d8d38a970:34825 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-11-28T09:23:54,365 INFO [M:0;363d8d38a970:34825 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-28T09:23:54,365 DEBUG [M:0;363d8d38a970:34825 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-28T09:23:54,365 INFO [M:0;363d8d38a970:34825 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-28T09:23:54,365 DEBUG [M:0;363d8d38a970:34825 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-28T09:23:54,365 DEBUG [M:0;363d8d38a970:34825 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-28T09:23:54,365 DEBUG [M:0;363d8d38a970:34825 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-28T09:23:54,365 INFO [M:0;363d8d38a970:34825 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=805.36 KB heapSize=992.80 KB 2024-11-28T09:23:54,417 DEBUG [M:0;363d8d38a970:34825 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/059935d9f9744f98a51865585601c1dd is 82, key is hbase:meta,,1/info:regioninfo/1732785663774/Put/seqid=0 2024-11-28T09:23:54,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742631_1807 (size=5672) 2024-11-28T09:23:54,457 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33819-0x1003d6e958f0001, quorum=127.0.0.1:53251, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-28T09:23:54,457 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33819-0x1003d6e958f0001, quorum=127.0.0.1:53251, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-28T09:23:54,458 INFO [RS:0;363d8d38a970:33819 {}] regionserver.HRegionServer(1307): Exiting; stopping=363d8d38a970,33819,1732785660637; zookeeper connection closed. 2024-11-28T09:23:54,459 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@3aa8ad02 {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@3aa8ad02 2024-11-28T09:23:54,460 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-28T09:23:54,842 INFO [M:0;363d8d38a970:34825 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=2311 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/059935d9f9744f98a51865585601c1dd 2024-11-28T09:23:54,876 DEBUG [M:0;363d8d38a970:34825 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/34f0ef1792474e05981951b632414e47 is 2279, key is \x00\x00\x00\x00\x00\x00\x00\x9E/proc:d/1732785800887/Put/seqid=0 2024-11-28T09:23:54,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742632_1808 (size=46831) 2024-11-28T09:23:54,892 INFO [M:0;363d8d38a970:34825 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=804.81 KB at sequenceid=2311 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/34f0ef1792474e05981951b632414e47 2024-11-28T09:23:54,895 INFO [M:0;363d8d38a970:34825 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 34f0ef1792474e05981951b632414e47 2024-11-28T09:23:54,928 DEBUG [M:0;363d8d38a970:34825 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/e8413b6efd5744a98814daeaf12520bb is 69, key is 363d8d38a970,33819,1732785660637/rs:state/1732785662530/Put/seqid=0 2024-11-28T09:23:54,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073742633_1809 (size=5156) 2024-11-28T09:23:54,958 INFO [M:0;363d8d38a970:34825 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=2311 (bloomFilter=true), to=hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/e8413b6efd5744a98814daeaf12520bb 2024-11-28T09:23:54,968 DEBUG [M:0;363d8d38a970:34825 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/059935d9f9744f98a51865585601c1dd as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/059935d9f9744f98a51865585601c1dd 2024-11-28T09:23:54,971 INFO [M:0;363d8d38a970:34825 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/059935d9f9744f98a51865585601c1dd, entries=8, sequenceid=2311, filesize=5.5 K 2024-11-28T09:23:54,973 DEBUG [M:0;363d8d38a970:34825 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/34f0ef1792474e05981951b632414e47 as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/34f0ef1792474e05981951b632414e47 2024-11-28T09:23:54,977 INFO [M:0;363d8d38a970:34825 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 34f0ef1792474e05981951b632414e47 2024-11-28T09:23:54,977 INFO [M:0;363d8d38a970:34825 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/34f0ef1792474e05981951b632414e47, entries=189, sequenceid=2311, filesize=45.7 K 2024-11-28T09:23:54,978 DEBUG [M:0;363d8d38a970:34825 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/e8413b6efd5744a98814daeaf12520bb as hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/e8413b6efd5744a98814daeaf12520bb 2024-11-28T09:23:54,981 INFO [M:0;363d8d38a970:34825 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:33549/user/jenkins/test-data/46660578-87f2-e15d-ff3b-88439ec55532/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/e8413b6efd5744a98814daeaf12520bb, entries=1, sequenceid=2311, filesize=5.0 K 2024-11-28T09:23:54,982 INFO [M:0;363d8d38a970:34825 {}] regionserver.HRegion(3040): Finished flush of dataSize ~805.36 KB/824691, heapSize ~992.50 KB/1016320, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 617ms, sequenceid=2311, compaction requested=false 2024-11-28T09:23:54,990 INFO [M:0;363d8d38a970:34825 {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-28T09:23:54,990 DEBUG [M:0;363d8d38a970:34825 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-11-28T09:23:54,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42869 is added to blk_1073741830_1006 (size=976185) 2024-11-28T09:23:54,993 INFO [M:0;363d8d38a970:34825 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-11-28T09:23:54,993 INFO [M:0;363d8d38a970:34825 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:34825 2024-11-28T09:23:54,994 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-11-28T09:23:54,996 DEBUG [M:0;363d8d38a970:34825 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/363d8d38a970,34825,1732785659868 already deleted, retry=false 2024-11-28T09:23:55,099 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34825-0x1003d6e958f0000, quorum=127.0.0.1:53251, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-28T09:23:55,099 INFO [M:0;363d8d38a970:34825 {}] regionserver.HRegionServer(1307): Exiting; stopping=363d8d38a970,34825,1732785659868; zookeeper connection closed. 2024-11-28T09:23:55,099 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34825-0x1003d6e958f0000, quorum=127.0.0.1:53251, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-28T09:23:55,117 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@10ba49e9{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-28T09:23:55,120 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@661c2e9c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-28T09:23:55,120 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-28T09:23:55,120 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2ca71a25{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-28T09:23:55,120 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@134e7cc5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b02eea69-620e-c121-967e-25e43ad9589c/hadoop.log.dir/,STOPPED} 2024-11-28T09:23:55,125 WARN [BP-2046945961-172.17.0.2-1732785656987 heartbeating to localhost/127.0.0.1:33549 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-28T09:23:55,125 WARN [BP-2046945961-172.17.0.2-1732785656987 heartbeating to localhost/127.0.0.1:33549 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2046945961-172.17.0.2-1732785656987 (Datanode Uuid e2f946b8-9461-43ef-9988-922692e43057) service to localhost/127.0.0.1:33549 2024-11-28T09:23:55,129 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-28T09:23:55,129 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-28T09:23:55,129 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b02eea69-620e-c121-967e-25e43ad9589c/cluster_759b5d56-d6ac-c9cb-81a4-eabfc0e7d3de/dfs/data/data1/current/BP-2046945961-172.17.0.2-1732785656987 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-28T09:23:55,129 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b02eea69-620e-c121-967e-25e43ad9589c/cluster_759b5d56-d6ac-c9cb-81a4-eabfc0e7d3de/dfs/data/data2/current/BP-2046945961-172.17.0.2-1732785656987 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-28T09:23:55,130 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-28T09:23:55,157 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@b03fcff{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-28T09:23:55,164 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@e0a3ea0{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-28T09:23:55,164 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-28T09:23:55,164 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@62a9beb2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-28T09:23:55,165 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@8167a4c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/b02eea69-620e-c121-967e-25e43ad9589c/hadoop.log.dir/,STOPPED} 2024-11-28T09:23:55,197 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-11-28T09:23:55,433 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down